def openai_chat(system: str, person: str) -> str:
resp = shopper.chat.completions.create(
mannequin=OPENAI_MODEL,
messages=[
".join(e.lessons[:4]),
"ep_id": eid, "rating": sc, "consequence": e.outcome_score, "process": e.process[:140], "classes": " ,
],
temperature=0.3
)
return resp.decisions[0].message.content material
def heuristic_responder(context: str, query: str) -> str:
classes = re.findall(r"Classes=(.*)", context)
keep away from = re.findall(r"Keep away from=(.*)", context)
ltm_lines = [ln for ln in context.splitlines() if ln.startswith("[LTM:")]
steps = []
if classes:
for chunk in classes[:2]:
for s in [x.strip() for x in chunk.split(";") if x.strip()]:
steps.append(s)
for ln in ltm_lines:
if "[LTM:procedure]" in ln.decrease():
proc = re.sub(r"^[LTM:procedure]s*", "", ln, flags=re.I)
proc = proc.break up("(salience=")[0].strip()
for half in [p.strip() for p in proc.split("|") if p.strip()]:
steps.append(half)
steps = steps[:8] if steps else ["Clarify the target outcome and constraints.", "Use semantic recall + episodic lessons to propose a plan.", "Execute, then store lessons learned."]
pitfalls = []
if keep away from:
for chunk in keep away from[:2]:
for s in [x.strip() for x in chunk.split(";") if x.strip()]:
pitfalls.append(s)
pitfalls = pitfalls[:6]
prefs = [ln for ln in ltm_lines if "[LTM:preference]" in ln.decrease()]
info = [ln for ln in ltm_lines if "[LTM:fact]" in ln.decrease() or "[LTM:constraint]" in ln.decrease()]
out = []
out.append("Reply (memory-informed, offline fallback)n")
if prefs:
out.append("Related preferences/constraints remembered:")
for ln in (prefs + info)[:6]:
out.append(" - " + ln.break up("] ",1)[1].break up(" (salience=")[0].strip())
out.append("")
out.append("Advisable strategy:")
for i, s in enumerate(steps, 1):
out.append(f" "ep_id": eid, "rating": sc, "consequence": e.outcome_score, "process": e.process[:140], "classes": " . ".be a part of(e.classes[:4])")
if pitfalls:
out.append("nPitfalls to keep away from (from episodic traces):")
for p in pitfalls:
out.append(" - " + p)
out.append("n(When you add an API key, the identical reminiscence context will feed a stronger LLM for higher-quality responses.)")
return "n".be a part of(out).strip()
class MemoryAugmentedAgent:
def __init__(self, mem: MemoryEngine):
self.mem = mem
def reply(self, query: str) -> Dict[str, Any]:
pack = self.mem.retrieve(query)
context = self.mem.build_context(query, pack)
system = (
"You're a memory-augmented agent. Use the supplied reminiscence context.n"
"Prioritize:n"
"1) Episodic classes (what labored earlier than)n"
"2) Lengthy-term info/preferences/proceduresn"
"3) Quick-term dialog staten"
"Be concrete and stepwise. If reminiscence conflicts, state the uncertainty."
)
if USE_OPENAI:
reply = openai_chat(system=system, person=context + "nnUser query:n" + query)
else:
reply = heuristic_responder(context=context, query=query)
self.mem.st_add("person", query, sort="message")
self.mem.st_add("assistant", reply, sort="message")
return {"reply": reply, "pack": pack, "context": context}
mem = MemoryEngine()
agent = MemoryAugmentedAgent(mem)
mem.ltm_add(sort="choice", textual content="Favor concise, structured solutions with steps and bullet factors when useful.", tags=["style"], pinned=True)
mem.ltm_add(sort="choice", textual content="Favor options that run on Google Colab with out additional setup.", tags=["environment"], pinned=True)
mem.ltm_add(sort="process", textual content="When constructing agent reminiscence: embed gadgets, retailer with salience/novelty coverage, retrieve with hybrid semantic+episodic, and decay overuse to keep away from repetition.", tags=["agent-memory"])
mem.ltm_add(sort="constraint", textual content="If no API secret's accessible, present a runnable offline fallback as an alternative of failing.", tags=["robustness"], pinned=True)
mem.episode_add(
process="Construct an agent reminiscence layer for troubleshooting Python errors in Colab",
constraints={"offline_ok": True, "single_notebook": True},
plan=[
"Capture short-term chat context",
"Store durable constraints/preferences in long-term vector memory",
"After solving, extract lessons into episodic traces",
"On new tasks, retrieve top episodic lessons + semantic facts"
],
actions=[
{"type":"analysis", "detail":"Identified recurring failure: missing installs and version mismatches."},
{"type":"action", "detail":"Added pip install block + minimal fallbacks."},
{"type":"action", "detail":"Added memory policy: pin constraints, drop low-salience items."}
],
outcome="Pocket book grew to become sturdy: runs with or with out exterior keys; troubleshooting high quality improved with episodic classes.",
outcome_score=0.90,
classes=[
"Always include a pip install cell for non-standard deps.",
"Pin hard constraints (e.g., offline fallback) into long-term memory.",
"Store a post-task 'lesson list' as an episodic trace for reuse."
],
failure_modes=[
"Assuming an API key exists and crashing when absent.",
"Storing too much noise into long-term memory causing irrelevant recall context."
],
tags=["colab","robustness","memory"]
)
print("✅ Reminiscence engine initialized.")
print(f" LTM gadgets: {len(mem.ltm)} | Episodes: {len(mem.episodes)} | ST gadgets: {len(mem.short_term)}")
q1 = "I wish to construct reminiscence for an agent in Colab. What ought to I retailer and the way do I retrieve it?"
out1 = agent.reply(q1)
print("n" + "="*90)
print("Q1 REPLYn")
print(out1["reply"][:1800])
q2 = "How do I keep away from my agent repeating the identical reminiscence time and again?"
out2 = agent.reply(q2)
print("n" + "="*90)
print("Q2 REPLYn")
print(out2["reply"][:1800])
def simple_outcome_eval(textual content: str) -> float:
hits = 0
for kw in ["decay", "usage", "penalty", "novelty", "prune", "retrieve", "episodic", "semantic"]:
if kw in textual content.decrease():
hits += 1
return float(np.clip(hits/8.0, 0.0, 1.0))
score2 = simple_outcome_eval(out2["reply"])
mem.episode_add(
process="Forestall repetitive recall in a memory-augmented agent",
constraints={"must_be_simple": True, "runs_in_colab": True},
plan=[
"Track usage counts per memory item",
"Apply usage-based penalty during ranking",
"Boost novelty during storage to reduce duplicates",
"Optionally prune low-salience memories"
],
actions=[
{"type":"design", "detail":"Added usage-based penalty 1/(1+alpha*usage)."},
{"type":"design", "detail":"Used novelty = 1 - max_similarity at store time."}
],
outcome=out2["reply"][:600],
outcome_score=score2,
classes=[
"Penalize overused memories during ranking (usage decay).",
"Enforce novelty threshold at storage time to prevent duplicates.",
"Keep episodic lessons distilled to avoid bloated recall context."
],
failure_modes=[
"No usage tracking, causing one high-similarity memory to dominate forever.",
"Storing raw chat logs as LTM instead of distilled summaries."
],
tags=["ranking","decay","policy"]
)
cons = mem.consolidate()
print("n" + "="*90)
print("CONSOLIDATION RESULT:", cons)
print("n" + "="*90)
print("LTM (prime rows):")
show(mem.ltm_df().head(12))
print("n" + "="*90)
print("EPISODES (prime rows):")
show(mem.episodes_df().head(12))
def debug_retrieval(question: str):
pack = mem.retrieve(question)
ctx = mem.build_context(question, pack)
sem = []
for mid, sc in pack["semantic_scored"]:
it = mem.ltm[mid]
sem.append({"mem_id": mid, "rating": sc, "sort": it.sort, "salience": it.salience, "utilization": it.utilization, "textual content": it.textual content[:160]})
ep = []
for eid, sc in pack["episodic_scored"]:
e = mem.episodes[eid]
ep.append( ".be a part of(e.classes[:4]))
return ctx, pd.DataFrame(sem), pd.DataFrame(ep)
print("n" + "="*90)
ctx, sem_df, ep_df = debug_retrieval("How do I design an agent reminiscence coverage for storage and retrieval?")
print(ctx[:1600])
print("nTop semantic hits:")
show(sem_df)
print("nTop episodic hits:")
show(ep_df)
print("n✅ Achieved. You now have working short-term, long-term vector, and episodic reminiscence with storage/retrieval insurance policies in a single Colab snippet.")
Elevate your perspective with NextTech Information, the place innovation meets perception.
Uncover the newest breakthroughs, get unique updates, and join with a worldwide community of future-focused thinkers.
Unlock tomorrow’s traits in the present day: learn extra, subscribe to our publication, and grow to be a part of the NextTech group at NextTech-news.com

