Skip to main content

API Reference

Complete quick-reference for the mycontext-ai SDK. For detailed explanations, see the dedicated section for each topic.


Core — mycontext.Context

from mycontext import Context

Constructor

Context(
guidance: Guidance | str | None = None,
directive: Directive | str | None = None,
constraints: Constraints | None = None,
knowledge: str | None = None,
data: dict = {},
metadata: dict = {},
thinking_strategy: str | None = None,
examples: list[dict[str, str]] | None = None,
research_flow: bool = False,
)

Strings passed to guidance or directive are automatically promoted to Guidance(role=...) and Directive(content=...).

Research Flow Fields

FieldTypeDescription
research_flowboolWhen True, uses 9-section research-backed assembly. Default False
thinking_strategystr | NoneReasoning strategy: step_by_step, multiple_angles, verify, explain_simply, creative
exampleslist[dict] | NoneFew-shot pairs [{"input": "...", "output": "..."}] — placed in middle zone when research_flow=True

Class Methods

MethodSignatureReturnsDescription
from_dictfrom_dict(data)ContextDeserialize from dict
from_jsonfrom_json(json_str)ContextDeserialize from JSON
from_skillfrom_skill(path, task?, **params)ContextBuild from a SKILL.md Agent Skill

Execution

MethodSignatureReturnsDescription
executeexecute(provider, **kwargs)ProviderResponseRun with an LLM (synchronous)
aexecuteaexecute(provider, **kwargs)Coroutine[ProviderResponse]Run asynchronously — native async/await
assemble_for_modelassemble_for_model(model, max_tokens?)strToken-budget assembly — tiktoken-accurate, trims to fit
to_promptto_prompt(refine=False, provider="openai", model="gpt-4o-mini")strZero-cost restructuring (refine=False) or LLM-distilled prompt (refine=True)

ProviderResponse.response — the LLM output text.

Export Formats

MethodReturnsDescription
assemble()strAssembled prompt. Uses classic or 9-section ordering based on research_flow
to_messages(user_message?)list[dict]OpenAI-style messages array
to_openai()dictchat.completions.create() kwargs
to_anthropic()dictmessages.create() kwargs
to_google()dictgenerate_content() kwargs
to_langchain()dictLangChain format
to_llamaindex()dictLlamaIndex format
to_crewai()dictCrewAI Agent + Task format
to_autogen()dictAutoGen format
to_json()strJSON string
to_yaml()strYAML string
to_xml()strXML string
to_markdown()strMarkdown string
to_dict()dictPython dict

Foundations — mycontext.foundation

Guidance

from mycontext.foundation import Guidance

Guidance(
role: str, # required
goal: str | None = None,
rules: list[str] = [],
style: str | None = None,
expertise: list[str] | None = None,
)
FieldDescription
roleThe persona or identity the LLM adopts
goalThe objective — what success looks like for this interaction
rulesBehavioral rules rendered as a numbered list
styleCommunication tone and style
expertiseDomain expertise areas
MethodReturnsDescription
render()strRendered guidance string

Directive

from mycontext.foundation import Directive

Directive(
content: str, # required
priority: int = 5, # 1-10
constraints: list[str] | None = None,
tags: list[str] | None = None,
)
MethodReturnsDescription
render()strRendered directive string

Constraints

from mycontext.foundation import Constraints

Constraints(
must_include: list[str] | None = None,
must_not_include: list[str] | None = None,
format_rules: list[str] | None = None,
max_length: int | None = None,
language: str | None = None,
output_schema: list[dict] | None = None,
)

output_schema takes a list of {"name": str, "type": str} dicts. In research_flow=True mode, this generates a dedicated ## OUTPUT FORMAT section with a JSON skeleton.

MethodReturnsDescription
render()strRendered constraints string

Cognitive Patterns — mycontext.templates

All patterns share the same interface:

pattern = PatternName()
ctx: Context = pattern.build_context(**inputs)
result = pattern.execute(provider="openai", **inputs)

Free Patterns (16)

from mycontext.templates.free.reasoning import (
RootCauseAnalyzer, # build_context(problem, depth)
StepByStepReasoner, # build_context(problem, steps)
HypothesisGenerator, # build_context(phenomenon, domain)
)
from mycontext.templates.free.analysis import (
DataAnalyzer, # build_context(data_description, analysis_type)
QuestionAnalyzer, # build_context(question, context)
)
from mycontext.templates.free.creative import (
Brainstormer, # build_context(topic, constraints)
)
from mycontext.templates.free.specialized import (
CodeReviewer, # build_context(code, language, focus)
RiskAssessor, # build_context(decision, depth)
ConflictResolver, # build_context(conflict, parties)
SocraticQuestioner, # build_context(topic, depth)
IntentRecognizer, # build_context(input, context)
)
from mycontext.templates.free.planning import (
ScenarioPlanner, # build_context(situation, horizon)
StakeholderMapper, # build_context(project, objective)
)
from mycontext.templates.free.communication import (
AudienceAdapter, # build_context(content, audience)
TechnicalTranslator, # build_context(technical_text, audience)
)
from mycontext.templates.free.reasoning import (
SynthesisBuilder, # build_context(sources, topic)
)

Enterprise Patterns

import mycontext
mycontext.activate_license("MC-ENT-...")

from mycontext.templates.enterprise.decision import (
DecisionFramework, ComparativeAnalyzer, TradeoffAnalyzer,
CostBenefitAnalyzer, MultiObjectiveOptimizer,
)
from mycontext.templates.enterprise.systems_thinking import (
FeedbackLoopIdentifier, LeveragePointFinder, SystemArchetypeAnalyzer,
)
from mycontext.templates.enterprise.ethical_reasoning import (
EthicalFrameworkAnalyzer, MoralDilemmaResolver,
)
# ... and 60+ more

Intelligence Layer — mycontext.intelligence

Top-level Functions

FunctionSignatureReturnsDescription
generate_contextgenerate_context(role, goal, task?, provider, model, **kwargs)GeneratedContextLLM generates full context from role + goal
transformtransform(input_text, provider?, model?)ContextAuto-select pattern + build context
suggest_patternssuggest_patterns(text, mode, top_k, include_enterprise)SuggestionResultPattern recommendations
assess_complexityassess_complexity(text, provider?)ComplexityResultTemplate needed?
smart_executesmart_execute(text, provider, model?)ProviderResponseOne-liner intelligent execution
smart_promptsmart_prompt(text, provider?, model?)strOne-liner prompt generation
smart_generic_promptsmart_generic_prompt(text, provider?, model?)strLLM-optimized generic prompt
build_workflow_chainbuild_workflow_chain(goal, provider, model?)WorkflowChainResultLLM-designed multi-pattern chain
from mycontext.intelligence import (
generate_context, GeneratedContext,
transform, suggest_patterns, assess_complexity,
smart_execute, smart_prompt, smart_generic_prompt,
build_workflow_chain,
)

GeneratedContext

@dataclass
class GeneratedContext:
context: Context # Fully-populated Context(research_flow=True)
generation_meta: dict # Raw spec produced by the LLM

def assemble(self) -> str # Pass-through to context.assemble()
def execute(self, provider, **kwargs) # Pass-through to context.execute()
def to_context(self) -> Context # Return the underlying Context

SuggestionResult

@dataclass
class SuggestionResult:
suggested_patterns: list[PatternSuggestion]
input_type: str
complexity: str
reasoning: str
used_llm: bool

PatternSuggestion

@dataclass
class PatternSuggestion:
name: str
confidence: float
reason: str
category: str
is_enterprise: bool

ComplexityResult

@dataclass
class ComplexityResult:
needs_template: bool
complexity_level: str # "simple" | "moderate" | "complex"
reasoning: str
recommended_pattern: str | None

WorkflowChainResult

@dataclass
class WorkflowChainResult:
steps: list[ChainStep]
total_steps: int
reasoning: str

PromptComposer

from mycontext.intelligence import PromptComposer

composer = PromptComposer(provider="openai", model="gpt-4o-mini")
composed: ComposedPrompt = composer.compose(contexts=[ctx1, ctx2])
composed: ComposedPrompt = composer.compose_from_templates(
templates=[Template1, Template2],
input_data={"key": "value"},
)

TransformationEngine

from mycontext.intelligence import TransformationEngine

engine = TransformationEngine()
ctx = engine.transform(input_text="Analyze X")
analysis: InputAnalysis = engine.analyze_input(input_text)

TemplateIntegratorAgent

from mycontext.intelligence import TemplateIntegratorAgent

agent = TemplateIntegratorAgent(provider="openai")
result: IntegrationResult = agent.integrate(
templates=[T1, T2],
input_data={"key": "value"},
)
result: IntegrationResult = agent.suggest_and_integrate(text="...", input_data={})
composed: ComposedPrompt = agent.suggest_and_compile(text="...")

Quality & Metrics — mycontext.intelligence

QualityMetrics

from mycontext.intelligence import QualityMetrics

metrics = QualityMetrics(mode="heuristic") # or "llm"
score: QualityScore = metrics.evaluate(ctx)
report: str = metrics.report(score)
comparison: dict = metrics.compare(ctx1, ctx2)

QualityScore

score.overall          # float 0-1
score.dimensions # dict[QualityDimension, float]
score.issues # list[str] — improvement suggestions
score.strengths # list[str]

Dimensions: CLARITY, COMPLETENESS, SPECIFICITY, RELEVANCE, STRUCTURE, EFFICIENCY

OutputEvaluator

from mycontext.intelligence import OutputEvaluator

evaluator = OutputEvaluator()
score: OutputQualityScore = evaluator.evaluate(context=ctx, output=response_text)
report: str = evaluator.report(score)

Dimensions: INSTRUCTION_FOLLOWING, REASONING_DEPTH, ACTIONABILITY, STRUCTURE_COMPLIANCE, COGNITIVE_SCAFFOLDING

ContextAmplificationIndex

from mycontext.intelligence import ContextAmplificationIndex

cai = ContextAmplificationIndex()
result: CAIResult = cai.measure(
template=MyPattern,
input_data={"key": "value"},
provider="openai",
)
result.cai_score # float (1.0 = neutral, >1.2 = significant lift)
result.verdict # "significant_improvement" | "moderate_improvement" | ...

TemplateBenchmark

from mycontext.intelligence import TemplateBenchmark

bench = TemplateBenchmark()
result: BenchmarkResult = bench.run(
template=MyPattern,
benchmark_name="my_benchmark",
provider="openai",
)
result.overall_score # float
result.pass_rate # float
result.avg_latency_ms # float

Integrations — mycontext.integrations

Helper Classes

from mycontext.integrations import (
LangChainHelper,
LlamaIndexHelper,
CrewAIHelper,
AutoGenHelper,
DSPyHelper,
SemanticKernelHelper,
GoogleADKHelper,
auto_integrate,
)
HelperKey Methods
LangChainHelperto_chat_prompt(), to_lcel_chain(), to_langchain_messages()
LlamaIndexHelperto_query_engine(), to_chat_engine()
CrewAIHelperto_agent(), to_task(), to_crew()
AutoGenHelperto_assistant_agent(), to_user_proxy()
DSPyHelperto_signature(), to_program()
SemanticKernelHelperto_semantic_function(), to_kernel_function()
GoogleADKHelperto_agent(), to_pipeline()

auto_integrate(ctx, framework)

result = auto_integrate(ctx, framework="langchain")
result = auto_integrate(ctx, framework="crewai")
result = auto_integrate(ctx, framework="autogen")

Advanced

Blueprintmycontext.structure

from mycontext.structure import Blueprint

bp = Blueprint(
name="my_blueprint",
guidance=Guidance(...),
directive_template="Do {task} for {subject}",
constraints=Constraints(...),
token_budget=4000,
optimization="balanced", # "speed" | "quality" | "cost" | "balanced"
)

ctx: Context = bp.build(task="analysis", subject="Q3 data")
bp_optimized: Blueprint = bp.optimize("quality")
tokens: int = bp.estimate_tokens()

Skill + SkillRunnermycontext.skills

from mycontext.skills.skill import Skill
from mycontext.skills import SkillRunner
from pathlib import Path

skill = Skill.load(Path("./skills/my-skill"))
runner = SkillRunner(log_runs=True)

result: SkillRunResult = runner.run(
skill_path=Path("./skills/my-skill"),
task="Specific task",
execute=True,
provider="openai",
quality_threshold=0.65,
**skill_params,
)

result.context # Context
result.quality_score # QualityScore
result.execution_result # ProviderResponse | None
result.gated # bool

Structured Output — mycontext.utils

from mycontext.utils.structured_output import (
extract_json,
StructuredOutputMixin,
PydanticOutput,
JSONOutput,
output_format,
)
from mycontext.utils.parsers import (
JSONParser,
ListParser,
CodeBlockParser,
MarkdownParser,
parse_json_response,
parse_code_blocks,
parse_list_items,
)

Token Utilities — mycontext.utils.tokens

from mycontext.utils.tokens import (
count_tokens, # count_tokens(text, model) → int
fits_in_window, # fits_in_window(text, model) → bool
token_budget_remaining, # token_budget_remaining(text, model) → int
estimate_cost_usd, # estimate_cost_usd(input_tokens, output_tokens, model) → float
)
FunctionSignatureReturnsDescription
count_tokens(text, model)intExact token count via tiktoken, falls back to char estimate
fits_in_window(text, model)boolTrue if text fits in model's context window
token_budget_remaining(text, model)intTokens remaining after text for the model's window
estimate_cost_usd(input_tokens, output_tokens, model)floatUSD cost estimate for a call

Tracing — mycontext.utils.tracing

from mycontext.utils.tracing import get_tracer, Span, Tracer

tracer = get_tracer() # Module-level singleton

# Read spans after execution
result = ctx.execute(provider="openai")
spans = tracer.get_spans()
span = spans[-1]

span.name # "litellm_generate"
span.metadata # {"model": "gpt-4o-mini", "tokens": 312, "cost_usd": 0.00012, "latency_ms": 1842}
span.duration_ms # float
span.error # str | None

Semantic Cache — mycontext.utils

from mycontext.utils import get_default_cache, reset_default_cache

cache = get_default_cache()
cache.size() # int — current entries
cache.clear() # wipe all entries

# Per-call cache bypass
result = ctx.execute(provider="openai", use_cache=False)

Security — mycontext.utils.template_safety

from mycontext.utils.template_safety import safe_format_template

# Safe substitution — rejects attribute/item access patterns
prompt = safe_format_template("Analyze {topic} for {audience}.", topic="revenue", audience="CFO")

# Raises ValueError for unsafe patterns like {obj.attr} or {obj[key]}

Enterprise License — mycontext

import mycontext

mycontext.activate_license("MC-ENT-...") # Persist key locally
mycontext.deactivate_license() # Remove key
mycontext.is_enterprise_active() # bool

from mycontext.license import get_license_key # Returns key or None

Provider Configuration

# Default provider selection
ctx.execute(provider="openai")
ctx.execute(provider="anthropic")
ctx.execute(provider="gemini")

# With explicit model + API key
ctx.execute(
provider="openai",
model="gpt-4o",
api_key="sk-...",
temperature=0,
max_tokens=4096,
)

Common Import Paths

# Core
from mycontext import Context
from mycontext.foundation import Guidance, Directive, Constraints

# Patterns
from mycontext.templates.free.reasoning import RootCauseAnalyzer
from mycontext.templates.enterprise.decision import DecisionFramework

# Intelligence
from mycontext.intelligence import (
generate_context, GeneratedContext,
transform, suggest_patterns, smart_execute, smart_prompt,
QualityMetrics, OutputEvaluator, ContextAmplificationIndex,
TemplateBenchmark, TemplateIntegratorAgent, PromptComposer,
build_workflow_chain,
)

# Integrations
from mycontext.integrations import LangChainHelper, auto_integrate

# Advanced
from mycontext.structure import Blueprint
from mycontext.skills import SkillRunner
from mycontext.skills.skill import Skill

# Token utilities
from mycontext.utils.tokens import count_tokens, fits_in_window, estimate_cost_usd

# Tracing
from mycontext.utils.tracing import get_tracer

# Cache
from mycontext.utils import get_default_cache

# Security
from mycontext.utils.template_safety import safe_format_template

# Structured output
from mycontext.utils.structured_output import extract_json, PydanticOutput
from mycontext.utils.parsers import JSONParser, ListParser

# License
import mycontext
mycontext.activate_license("MC-ENT-...")