Track Turns
Update chat_loop to maintain conversation history and pass it to build_prompt on every turn
💻
Writing code and entering commands is only available on desktop. Open this page on a larger screen to complete this chapter.
Building up the conversation memory
The messages list is the conversation memory. It starts empty at the beginning of each session. After each exchange, you append two entries: one recording what the user asked and one recording what the assistant answered.
Every new call to build_prompt receives the full messages list so the model sees all prior turns before generating the next answer. The list grows by two entries per turn, accumulating the complete history of the conversation.
Instructions
- Create an empty list called
messageson the line afterfile_list = ..., beforewhile True. This is the conversation memory — it starts empty at the beginning of each session and grows as the user and assistant exchange messages. - The
build_promptfunction now accepts a history parameter. Update the existingbuild_promptcall tobuild_prompt(question, top_chunks, messages, file_list), passingmessagesas the third argument betweentop_chunksandfile_list. - After
print(f"Assistant: {answer}"), record what the user asked by appending{"role": "user", "content": question}tomessages. This saves the question so it appears in the next prompt. - Record what the assistant answered by appending
{"role": "assistant", "content": answer}tomessages. Saving both sides of the exchange gives the model the full back-and-forth context on the next question.
import json
import os
import sys
import time
import numpy as np
from dotenv import load_dotenv
from google import genai
from google.genai import types
from files import index_folder
def create_client():
load_dotenv()
api_key = os.getenv("GEMINI_API_KEY")
client = genai.Client(api_key=api_key)
return client
def embed_text(client, text):
result = client.models.embed_content(model="gemini-embedding-001", contents=text, config=types.EmbedContentConfig(task_type="RETRIEVAL_DOCUMENT"))
return result.embeddings[0].values
def embed_all_chunks(client, texts):
BATCH_SIZE = 90
embeddings = []
for i in range(0, len(texts), BATCH_SIZE):
batch = texts[i : i + BATCH_SIZE]
for text in batch:
embeddings.append(embed_text(client, text))
if i + BATCH_SIZE < len(texts):
print("Rate limit pause — waiting 60 seconds...")
time.sleep(60)
return embeddings
def cosine_similarity(vec_a, vec_b):
dot = np.dot(vec_a, vec_b)
norm = np.linalg.norm(vec_a) * np.linalg.norm(vec_b)
return dot / norm
def search(client, query, chunks, embeddings, top_k=3):
result = client.models.embed_content(model="gemini-embedding-001", contents=query, config=types.EmbedContentConfig(task_type="RETRIEVAL_QUERY"))
query_vector = result.embeddings[0].values
scores = [(cosine_similarity(query_vector, emb), chunk) for emb, chunk in zip(embeddings, chunks)]
scores.sort(key=lambda x: x[0], reverse=True)
return [chunk for _, chunk in scores[:top_k]]
def build_prompt(question, context_chunks, history=None, file_list=None):
context = "\n\n".join(chunk["text"] for chunk in context_chunks)
history_text = format_history(history or [])
files_line = ""
if file_list:
files_line = f"You have access to these files: {', '.join(file_list)}\n"
return (
f"{files_line}"
"You are a helpful assistant. Answer the question using only the context below.\n"
"If the answer is not in the context, say \"I don't know.\"\n\n"
f"Context:\n{context}"
f"{history_text}\n\n"
f"Question:\n{question}"
)
def generate_answer(client, prompt):
response = client.models.generate_content(model="gemini-2.5-flash", contents=prompt)
return response.text
def save_embeddings(chunks, embeddings, cache_path):
data = {"chunks": chunks, "embeddings": embeddings}
with open(cache_path, "w") as f:
json.dump(data, f)
def load_embeddings(cache_path):
if not os.path.exists(cache_path):
return None
with open(cache_path) as f:
data = json.load(f)
return data["chunks"], data["embeddings"]
def format_history(messages):
if not messages:
return ""
lines = ["\nConversation so far:"]
for msg in messages:
role = "You" if msg["role"] == "user" else "Assistant"
lines.append(f"{role}: {msg['content']}")
return "\n".join(lines)
def chat_loop(client, chunks, embeddings):
file_list = sorted(set(chunk["source"] for chunk in chunks))
# Step 1:
print("Assistant ready. Type your question, or /help for commands.\n")
while True:
question = input("You: ").strip()
if not question:
continue
top_chunks = search(client, question, chunks, embeddings)
# Step 2: update build_prompt call
prompt = build_prompt(question, top_chunks, file_list)
answer = generate_answer(client, prompt)
print(f"Assistant: {answer}")
# Step 3:
# Step 4:
def main():
if len(sys.argv) < 2:
print("Usage: python app.py <folder>")
sys.exit(1)
folder = sys.argv[1]
cache_path = folder.rstrip("/\\") + ".cache.json"
client = create_client()
cached = load_embeddings(cache_path)
if cached:
chunks, embeddings = cached
print(f"Loaded cache from {cache_path}")
else:
print(f"Indexing {folder}...")
chunks = index_folder(folder)
texts = [chunk["text"] for chunk in chunks]
file_count = len(set(chunk["source"] for chunk in chunks))
print(f"Indexed {len(chunks)} chunks from {file_count} files.")
embeddings = embed_all_chunks(client, texts)
save_embeddings(chunks, embeddings, cache_path)
print(f"Cache saved to {cache_path}")
chat_loop(client, chunks, embeddings)
if __name__ == "__main__":
main()
Interactive Code Editor
Sign in to write and run code, track your progress, and unlock all chapters.
Sign In to Start Coding