AgenticMemory
Python API Reference
Complete reference for the agentic_memory Python package (v0.2.0). Install with pip install agentic-brain.
Complete reference for the agentic_memory Python package (v0.2.0). Install with pip install agentic-brain.
Brain
The primary class for interacting with an AgenticMemory graph. Each Brain instance corresponds to a single .amem file.
Constructor
Brain(path: str | Path)Opens an existing brain file or creates a new one at the given path. A new session is started automatically on each instantiation.
Parameters:
| Parameter | Type | Description |
|---|---|---|
path | str | Path | Path to the .amem file. Created if it does not exist. |
Raises: BrainError if the file exists but is corrupted or has an incompatible version.
Example:
from agentic_memory import Brain
brain = Brain("my_agent.amem")
brain = Brain(Path("/data/agents/assistant.amem"))add_fact()
Brain.add_fact(
content: str,
confidence: float = 1.0,
metadata: dict[str, str] | None = None
) -> EventStores a fact event -- externally observed or received information.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
content | str | required | The textual content of the fact. |
confidence | float | 1.0 | Confidence score between 0.0 and 1.0. |
metadata | dict[str, str] | None | None | Optional key-value metadata. |
Returns: Event -- the newly created event with its assigned ID.
add_decision()
Brain.add_decision(
content: str,
confidence: float = 1.0,
metadata: dict[str, str] | None = None
) -> EventStores a decision event -- a choice or judgment the agent has made.
Parameters: Same as add_fact().
Returns: Event
add_inference()
Brain.add_inference(
content: str,
confidence: float = 1.0,
metadata: dict[str, str] | None = None
) -> EventStores an inference event -- a conclusion derived from existing knowledge.
Parameters: Same as add_fact().
Returns: Event
add_correction()
Brain.add_correction(
content: str,
confidence: float = 1.0,
metadata: dict[str, str] | None = None
) -> EventStores a correction event -- an update that revises previous knowledge. Typically followed by a supersedes edge linking the correction to the event it replaces.
Parameters: Same as add_fact().
Returns: Event
add_skill()
Brain.add_skill(
content: str,
confidence: float = 1.0,
metadata: dict[str, str] | None = None
) -> EventStores a skill event -- a learned capability or reusable procedure.
Parameters: Same as add_fact().
Returns: Event
add_episode()
Brain.add_episode(
content: str,
confidence: float = 1.0,
metadata: dict[str, str] | None = None
) -> EventStores an episode event -- a narrative summary of an interaction or experience.
Parameters: Same as add_fact().
Returns: Event
link()
Brain.link(
source: int,
target: int,
edge_type: str | EdgeType,
weight: float = 1.0
) -> EdgeCreates a directed, weighted edge between two events.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
source | int | required | ID of the source event. |
target | int | required | ID of the target event. |
edge_type | str | EdgeType | required | One of: "caused_by", "supports", "contradicts", "supersedes", "related_to", "part_of", "temporal_next". |
weight | float | 1.0 | Edge weight between 0.0 and 1.0. |
Returns: Edge
Raises: BrainError if either node ID does not exist, or if the edge type is invalid.
facts()
Brain.facts(session: int | None = None) -> list[Event]Returns all fact events, optionally filtered to a specific session.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
session | int | None | None | If provided, only return facts from this session. |
Returns: list[Event]
decisions()
Brain.decisions(session: int | None = None) -> list[Event]Returns all decision events, optionally filtered to a specific session.
Parameters: Same as facts().
Returns: list[Event]
traverse()
Brain.traverse(
start: int,
depth: int = 3,
edge_types: list[str | EdgeType] | None = None
) -> TraversalResultPerforms a breadth-first traversal of the graph starting from the given node.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
start | int | required | ID of the starting node. |
depth | int | 3 | Maximum traversal depth. |
edge_types | list[str | EdgeType] | None | None | If provided, only follow edges of these types. |
Returns: TraversalResult
resolve()
Brain.resolve(event_id: int) -> EventFollows the supersedes chain from the given event to find the most current version. If no supersedes edge exists, returns the original event.
Parameters:
| Parameter | Type | Description |
|---|---|---|
event_id | int | ID of the event to resolve. |
Returns: Event -- the most current version in the supersedes chain.
impact()
Brain.impact(event_id: int, depth: int = 5) -> ImpactResultAnalyzes the downstream impact of an event by traversing all outgoing edges (reverse direction). Returns all events that depend on, were caused by, or reference the given event.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
event_id | int | required | ID of the event to analyze. |
depth | int | 5 | Maximum traversal depth. |
Returns: ImpactResult
info()
Brain.info() -> BrainInfoReturns summary information about the brain.
Returns: BrainInfo
session_info()
Brain.session_info(session: int) -> SessionInfoReturns detailed information about a specific session.
Parameters:
| Parameter | Type | Description |
|---|---|---|
session | int | The session ID to query. |
Returns: SessionInfo
Raises: BrainError if the session does not exist.
search()
Brain.search(
query: str,
top_k: int = 10,
event_type: str | EventType | None = None,
session: int | None = None,
min_confidence: float = 0.0
) -> list[SearchResult]Performs semantic similarity search across all events using 128-dimensional feature vectors.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
query | str | required | Natural language search query. |
top_k | int | 10 | Maximum number of results to return. |
event_type | str | EventType | None | None | Filter results to a specific event type. |
session | int | None | None | Filter results to a specific session. |
min_confidence | float | 0.0 | Minimum confidence threshold. |
Returns: list[SearchResult] -- results sorted by descending similarity score.
v0.2 Query Expansion Methods
The following nine methods were added in v0.2.0. All operate through the Rust CLI backend.
search_text()
Brain.search_text(
query: str,
top_k: int = 10,
event_type: str | None = None
) -> list[TextMatch]BM25 text search over node content. Uses the TermIndex fast path when available (1.58 ms @ 100K nodes), falls back to full-scan slow path on v0.1 files (122 ms @ 100K).
Returns: list[TextMatch] -- results with BM25 scores.
search() (hybrid)
Brain.search(
query: str,
top_k: int = 10
) -> list[HybridMatch]Hybrid search combining BM25 and vector similarity via Reciprocal Rank Fusion (RRF). Measured at 10.83 ms on 100K-node graphs.
Returns: list[HybridMatch] -- results with combined, BM25, and vector scores.
centrality()
Brain.centrality(
metric: str = "pagerank",
event_type: str | None = None
) -> list[CentralityResult]Graph centrality analysis. Supported metrics: "pagerank" (34.3 ms @ 100K), "degree" (20.7 ms @ 100K), "betweenness" (10.1 s @ 100K).
Returns: list[CentralityResult] -- nodes ranked by centrality score.
shortest_path()
Brain.shortest_path(
src: int,
dst: int,
weighted: bool = False
) -> PathResultFinds shortest path between two nodes. Uses bidirectional BFS for unweighted graphs (104 us @ 100K) or Dijkstra for weighted (17.6 ms @ 100K).
Returns: PathResult -- the path as a sequence of node IDs and total cost.
revise()
Brain.revise(node_id: int) -> RevisionReportCounterfactual belief revision. Computes the downstream cascade if the given node were retracted. Read-only -- no mutations. Measured at 53.4 ms @ 100K nodes.
Returns: RevisionReport -- affected nodes, confidence losses, unsupported inferences.
gaps()
Brain.gaps(
min_severity: float = 0.0,
max_age_days: int = 30
) -> GapReportIdentifies five categories of reasoning weaknesses: unjustified decisions, single-source inferences, low-confidence foundations, unstable knowledge, and stale evidence.
Returns: GapReport -- gaps ordered by severity, with health score.
analogy()
Brain.analogy(
node_id: int,
top_k: int = 5
) -> list[Analogy]Finds structurally similar past reasoning patterns using structural fingerprints (in-degree, out-degree, edge-type distribution) combined with content similarity.
Returns: list[Analogy] -- top-k analogous nodes with similarity scores.
consolidate()
Brain.consolidate(
dry_run: bool = True
) -> ConsolidationReportGraph maintenance: deduplication, contradiction linking, inference promotion, orphan detection. Dry-run mode (default) reports proposed changes without modifying the graph.
Returns: ConsolidationReport -- proposed or applied changes.
drift()
Brain.drift(
topic: str | None = None
) -> DriftReportTracks belief evolution over time by analyzing supersedes chains and confidence trajectories. Measured at 68.4 ms @ 100K nodes.
Returns: DriftReport -- per-topic stability scores, drift direction, revision frequency.
v0.2 Result Dataclasses
TextMatch
@dataclass
class TextMatch:
node_id: int
content: str
bm25_score: float
event_type: strHybridMatch
@dataclass
class HybridMatch:
node_id: int
content: str
combined_score: float
bm25_score: float
vector_score: floatCentralityResult
@dataclass
class CentralityResult:
node_id: int
score: float
content: strPathResult
@dataclass
class PathResult:
path: list[int]
total_cost: float
found: boolRevisionReport
@dataclass
class RevisionReport:
target_id: int
affected_nodes: list[int]
confidence_losses: dict[int, float]
unsupported: list[int]
total_impact: floatGapReport
@dataclass
class GapReport:
gaps: list[dict]
health_score: float
total_gaps: intAnalogy
@dataclass
class Analogy:
node_id: int
structural_score: float
content_score: float
combined_score: floatConsolidationReport
@dataclass
class ConsolidationReport:
duplicates: list[tuple[int, int, float]]
contradictions: list[tuple[int, int]]
promotions: list[int]
orphans: list[int]
dry_run: boolDriftReport
@dataclass
class DriftReport:
topics: list[dict]
overall_stability: floatMemoryAgent
Connects a Brain to an LLM provider for automatic memory extraction from conversations.
Constructor
MemoryAgent(
brain: Brain,
provider: LLMProvider,
auto_link: bool = True,
extraction_prompt: str | None = None
)Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
brain | Brain | required | The brain to store extracted memories in. |
provider | LLMProvider | required | An LLM provider instance. |
auto_link | bool | True | Automatically create edges between extracted events and relevant existing events. |
extraction_prompt | str | None | None | Custom system prompt for memory extraction. Uses a sensible default if not provided. |
chat()
MemoryAgent.chat(
message: str,
context: list[Event] | None = None
) -> strSends a message to the LLM with relevant memory context, returns the response, and extracts new cognitive events from the conversation.
Parameters:
| Parameter | Type | Default | Description |
|---|---|---|---|
message | str | required | The user message to process. |
context | list[Event] | None | None | Additional events to include in the LLM context. If None, relevant events are retrieved automatically via similarity search. |
Returns: str -- the LLM's response text.
last_extraction
MemoryAgent.last_extraction: list[Event]A read-only property containing the list of events extracted from the most recent chat() call. Empty if no extraction occurred.
Data Classes
Event
Represents a single cognitive event in the graph.
@dataclass
class Event:
id: int # Unique node ID within the brain
event_type: EventType # Fact, Decision, Inference, Correction, Skill, Episode
content: str # The textual content
session: int # Session ID this event belongs to
confidence: float # Confidence score (0.0 to 1.0)
timestamp: datetime # UTC timestamp of creation
metadata: dict[str, str] # Optional key-value metadataEdge
Represents a directed, weighted relationship between two events.
@dataclass
class Edge:
source: int # Source node ID
target: int # Target node ID
edge_type: EdgeType # Relationship type
weight: float # Edge weight (0.0 to 1.0)BrainInfo
Summary information about a brain.
@dataclass
class BrainInfo:
node_count: int # Total number of events
edge_count: int # Total number of edges
session_count: int # Number of sessions
file_size: int # File size in bytes
sessions: list[int] # List of session IDs
version: int # File format versionSessionInfo
Detailed information about a single session.
@dataclass
class SessionInfo:
id: int # Session ID
node_count: int # Number of events in this session
edge_count: int # Number of edges between session events
start_time: datetime # Timestamp of the first event
end_time: datetime # Timestamp of the last event
event_types: dict[str, int] # Count of each event typeTraversalResult
Result of a graph traversal operation.
@dataclass
class TraversalResult:
nodes: list[Event] # All nodes reached during traversal
edges: list[Edge] # All edges traversed
depth_reached: int # Maximum depth actually reachedImpactResult
Result of an impact analysis.
@dataclass
class ImpactResult:
affected: list[Event] # Events downstream of the analyzed event
edges: list[Edge] # Edges in the impact graph
total_affected: int # Total count of affected eventsSearchResult
A single result from a similarity search.
@dataclass
class SearchResult:
event: Event # The matching event
score: float # Similarity score (0.0 to 1.0)Enums
EventType
class EventType(str, Enum):
FACT = "fact"
DECISION = "decision"
INFERENCE = "inference"
CORRECTION = "correction"
SKILL = "skill"
EPISODE = "episode"EdgeType
class EdgeType(str, Enum):
CAUSED_BY = "caused_by"
SUPPORTS = "supports"
CONTRADICTS = "contradicts"
SUPERSEDES = "supersedes"
RELATED_TO = "related_to"
PART_OF = "part_of"
TEMPORAL_NEXT = "temporal_next"Exceptions
BrainError
class BrainError(Exception):
"""Raised for brain file operations: corruption, invalid IDs, I/O failures."""
passCLIError
class CLIError(Exception):
"""Raised when the underlying Rust CLI returns an error."""
passProviderError
class ProviderError(Exception):
"""Raised for LLM provider failures: API errors, rate limits, invalid responses."""
passLLMProvider (Abstract Base)
Base class for implementing custom LLM providers.
from abc import ABC, abstractmethod
from agentic_memory import Event
class LLMProvider(ABC):
@abstractmethod
def complete(self, prompt: str, system: str | None = None) -> str:
"""Send a prompt to the LLM and return the completion text.
Args:
prompt: The user/input prompt.
system: Optional system prompt.
Returns:
The LLM's response text.
Raises:
ProviderError: If the API call fails.
"""
...
@abstractmethod
def extract_events(self, text: str) -> list[dict]:
"""Extract cognitive events from text.
The LLM should identify facts, decisions, inferences, etc.
in the input text and return them as structured dictionaries.
Args:
text: The text to extract events from.
Returns:
List of dicts with keys: "type", "content", "confidence".
Raises:
ProviderError: If extraction fails.
"""
...
def embed(self, text: str) -> list[float] | None:
"""Generate an embedding vector for the given text.
Optional. If not implemented, the default internal embedding
model is used. Return a list of 128 floats.
Args:
text: The text to embed.
Returns:
A 128-dimensional float vector, or None to use the default.
"""
return NoneImplementing a Custom Provider
from agentic_memory.integrations import LLMProvider, ProviderError
class MyCustomProvider(LLMProvider):
def __init__(self, api_url: str, api_key: str):
self.api_url = api_url
self.api_key = api_key
def complete(self, prompt: str, system: str | None = None) -> str:
# Call your LLM API here
response = requests.post(
f"{self.api_url}/completions",
headers={"Authorization": f"Bearer {self.api_key}"},
json={"prompt": prompt, "system": system}
)
if response.status_code != 200:
raise ProviderError(f"API error: {response.status_code}")
return response.json()["text"]
def extract_events(self, text: str) -> list[dict]:
extraction_prompt = f"Extract cognitive events from: {text}"
raw = self.complete(extraction_prompt, system="Extract facts, decisions, inferences...")
# Parse the LLM output into structured events
return parse_extraction(raw)
# Usage
provider = MyCustomProvider("https://my-llm.example.com", "my-api-key")
agent = MemoryAgent(brain, provider)Built-in Providers
AnthropicProvider
from agentic_memory.integrations import AnthropicProvider
provider = AnthropicProvider(
api_key: str = None, # Defaults to ANTHROPIC_API_KEY env var
model: str = "claude-sonnet-4-20250514",
)OpenAIProvider
from agentic_memory.integrations import OpenAIProvider
provider = OpenAIProvider(
api_key: str = None, # Defaults to OPENAI_API_KEY env var
model: str = "gpt-4o",
)OllamaProvider
from agentic_memory.integrations import OllamaProvider
provider = OllamaProvider(
model: str = "llama3.1",
host: str = "http://localhost:11434",
)