Add Phases 3-5: State transformation, OccasionLogger, OccasionManager

Phase 3 - State Transformation:
- transform_state() function with alpha/beta parameters
- compute_adaptive_params() for dynamic transformation
- StateTransformer class for state management

Phase 4 - Occasion Logger:
- OccasionLog dataclass for structured logging
- OccasionLogger for JSON file storage
- Profile evolution tracking and statistics

Phase 5 - Occasion Manager:
- Full cycle: Prehension → Concrescence → Satisfaction
- Search integration (thoughts, library)
- State creation and logging orchestration

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-31 17:09:36 +01:00
parent 21f5676c7b
commit 6af52866ed
7 changed files with 1489 additions and 1 deletions

View File

@@ -5,7 +5,54 @@ Ce module implémente l'architecture processuelle d'Ikario basée sur:
- La Process Philosophy de Whitehead
- Le State Vector comme identité émergente
- Le cycle d'occasion (Prehension → Concrescence → Satisfaction)
Modules:
- state_vector: Gestion du vecteur d'état et collection Weaviate
- projection_directions: Directions interprétables dans l'espace latent
- state_transformation: Fonction de transition S(t-1) → S(t)
- occasion_logger: Logging des occasions d'expérience
- occasion_manager: Orchestrateur du cycle d'occasion
"""
__version__ = "0.1.0"
__version__ = "0.2.0"
__author__ = "David (parostagore)"
# Exports principaux
from .state_vector import (
create_state_vector_collection,
get_current_state_id,
get_state_vector,
)
from .state_transformation import (
transform_state,
compute_adaptive_params,
StateTransformer,
)
from .occasion_logger import (
OccasionLog,
OccasionLogger,
)
from .occasion_manager import (
OccasionManager,
get_state_profile,
)
__all__ = [
# state_vector
"create_state_vector_collection",
"get_current_state_id",
"get_state_vector",
# state_transformation
"transform_state",
"compute_adaptive_params",
"StateTransformer",
# occasion_logger
"OccasionLog",
"OccasionLogger",
# occasion_manager
"OccasionManager",
"get_state_profile",
]

View File

@@ -0,0 +1,247 @@
#!/usr/bin/env python3
"""
OccasionLogger - Logging des occasions d'expérience.
Chaque occasion est loggée avec:
- Trigger (type, contenu)
- Préhension (pensées/docs récupérés)
- Concrescence (réponse, outils utilisés)
- Satisfaction (nouvel état, paramètres)
- Profils avant/après
Les logs sont stockés en JSON pour analyse et debugging.
"""
import json
from dataclasses import dataclass, asdict, field
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Dict, Any
@dataclass
class OccasionLog:
"""Structure de log pour une occasion."""
# Identifiants
occasion_id: int
timestamp: str
# Trigger
trigger_type: str # "user", "timer", "event"
trigger_content: str
# Préhension
previous_state_id: int
prehended_thoughts_count: int
prehended_docs_count: int
prehended_thoughts: List[str] = field(default_factory=list) # Résumés des pensées
# Concrescence
response_summary: str
new_thoughts: List[str] = field(default_factory=list)
tools_used: List[str] = field(default_factory=list)
# Satisfaction
new_state_id: int
alpha_used: float
beta_used: float
# Profils
profile_before: Dict[str, Dict[str, float]] = field(default_factory=dict)
profile_after: Dict[str, Dict[str, float]] = field(default_factory=dict)
# Métriques
processing_time_ms: int = 0
token_count: Optional[int] = None
class OccasionLogger:
"""Gère le logging des occasions en fichiers JSON."""
def __init__(self, log_dir: str = "logs/occasions"):
"""
Args:
log_dir: Répertoire de stockage des logs
"""
self.log_dir = Path(log_dir)
self.log_dir.mkdir(parents=True, exist_ok=True)
def log(self, occasion: OccasionLog) -> Path:
"""
Enregistre une occasion.
Args:
occasion: OccasionLog à enregistrer
Returns:
Chemin du fichier créé
"""
filename = f"occasion_{occasion.occasion_id:06d}.json"
filepath = self.log_dir / filename
with open(filepath, 'w', encoding='utf-8') as f:
json.dump(asdict(occasion), f, indent=2, ensure_ascii=False)
print(f"[OccasionLogger] Occasion {occasion.occasion_id}{filepath}")
return filepath
def get_occasion(self, occasion_id: int) -> Optional[OccasionLog]:
"""
Récupère une occasion par son ID.
Args:
occasion_id: ID de l'occasion
Returns:
OccasionLog ou None si non trouvé
"""
filename = f"occasion_{occasion_id:06d}.json"
filepath = self.log_dir / filename
if not filepath.exists():
return None
with open(filepath, 'r', encoding='utf-8') as f:
data = json.load(f)
return OccasionLog(**data)
def get_recent_occasions(self, limit: int = 10) -> List[OccasionLog]:
"""
Récupère les N dernières occasions.
Args:
limit: Nombre max d'occasions à retourner
Returns:
Liste des occasions (plus récentes d'abord)
"""
files = sorted(self.log_dir.glob("occasion_*.json"), reverse=True)
occasions = []
for f in files[:limit]:
with open(f, 'r', encoding='utf-8') as fp:
data = json.load(fp)
occasions.append(OccasionLog(**data))
return occasions
def get_last_occasion_id(self) -> int:
"""Retourne l'ID de la dernière occasion (-1 si aucune)."""
files = sorted(self.log_dir.glob("occasion_*.json"), reverse=True)
if not files:
return -1
# Extraire l'ID du nom de fichier
filename = files[0].stem # occasion_000042
try:
return int(filename.split('_')[1])
except (IndexError, ValueError):
return -1
def get_profile_evolution(
self,
component: str,
last_n: int = 20
) -> List[tuple]:
"""
Retourne l'évolution d'une composante sur les N dernières occasions.
Args:
component: Nom de la composante (ex: "curiosity")
last_n: Nombre d'occasions à considérer
Returns:
Liste de tuples (occasion_id, valeur)
"""
occasions = self.get_recent_occasions(last_n)
evolution = []
for occ in reversed(occasions): # Ordre chronologique
# Chercher la composante dans le profil après
for category, comps in occ.profile_after.items():
if component in comps:
evolution.append((occ.occasion_id, comps[component]))
break
return evolution
def get_statistics(self, last_n: int = 100) -> Dict[str, Any]:
"""
Calcule des statistiques sur les occasions récentes.
Args:
last_n: Nombre d'occasions à analyser
Returns:
Dictionnaire de statistiques
"""
occasions = self.get_recent_occasions(last_n)
if not occasions:
return {"count": 0}
# Statistiques de base
processing_times = [o.processing_time_ms for o in occasions]
thoughts_created = [len(o.new_thoughts) for o in occasions]
tools_counts = [len(o.tools_used) for o in occasions]
# Répartition des triggers
trigger_types = {}
for o in occasions:
trigger_types[o.trigger_type] = trigger_types.get(o.trigger_type, 0) + 1
return {
"count": len(occasions),
"processing_time": {
"avg_ms": sum(processing_times) / len(processing_times),
"min_ms": min(processing_times),
"max_ms": max(processing_times),
},
"thoughts_created": {
"total": sum(thoughts_created),
"avg_per_occasion": sum(thoughts_created) / len(thoughts_created),
},
"tools": {
"avg_per_occasion": sum(tools_counts) / len(tools_counts),
},
"trigger_distribution": trigger_types,
}
# Test
if __name__ == "__main__":
logger = OccasionLogger("tests/temp_logs")
# Créer une occasion test
occasion = OccasionLog(
occasion_id=1,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content="Test question sur Whitehead",
previous_state_id=0,
prehended_thoughts_count=5,
prehended_docs_count=2,
prehended_thoughts=["Pensée 1", "Pensée 2"],
response_summary="Réponse détaillée sur le processus...",
new_thoughts=["Nouvelle insight sur le devenir"],
tools_used=["search_thoughts", "search_library"],
new_state_id=1,
alpha_used=0.85,
beta_used=0.15,
profile_before={"epistemic": {"curiosity": 0.5, "certainty": 0.3}},
profile_after={"epistemic": {"curiosity": 0.55, "certainty": 0.32}},
processing_time_ms=1500
)
# Logger
filepath = logger.log(occasion)
print(f"Logged to: {filepath}")
# Relire
loaded = logger.get_occasion(1)
print(f"Loaded: trigger_type={loaded.trigger_type}, new_state_id={loaded.new_state_id}")
# Stats
print(f"Stats: {logger.get_statistics()}")

View File

@@ -0,0 +1,414 @@
#!/usr/bin/env python3
"""
OccasionManager - Orchestrateur du cycle d'occasion Ikario.
Gère le cycle complet:
Préhension → Concrescence → Satisfaction
Ce module coordonne:
- La récupération du contexte (pensées, documents)
- L'appel au LLM pour générer la réponse
- La création du nouvel état
- Le logging de l'occasion
"""
import os
import time
from datetime import datetime
from typing import Dict, Any, List, Optional
import numpy as np
import requests
from .state_transformation import StateTransformer, compute_adaptive_params
from .occasion_logger import OccasionLogger, OccasionLog
WEAVIATE_URL = os.getenv("WEAVIATE_URL", "http://localhost:8080")
def get_state_profile(state_id: int) -> Dict[str, Dict[str, float]]:
"""
Calcule le profil d'un état (projections sur les directions).
Args:
state_id: ID de l'état
Returns:
Dictionnaire {category: {component: value}}
"""
# Récupérer le vecteur d'état
state_query = {
"query": """
{
Get {
StateVector(where: {
path: ["state_id"],
operator: Equal,
valueInt: %d
}) {
_additional { vector }
}
}
}
""" % state_id
}
response = requests.post(
f"{WEAVIATE_URL}/v1/graphql",
json=state_query,
headers={"Content-Type": "application/json"}
)
if response.status_code != 200:
return {}
data = response.json()
states = data.get("data", {}).get("Get", {}).get("StateVector", [])
if not states:
return {}
state_vector = np.array(states[0]["_additional"]["vector"])
# Récupérer toutes les directions
dir_query = {
"query": """
{
Get {
ProjectionDirection {
name
category
_additional { vector }
}
}
}
"""
}
response = requests.post(
f"{WEAVIATE_URL}/v1/graphql",
json=dir_query,
headers={"Content-Type": "application/json"}
)
if response.status_code != 200:
return {}
data = response.json()
directions = data.get("data", {}).get("Get", {}).get("ProjectionDirection", [])
# Calculer les projections
profile = {}
for d in directions:
direction_vector = np.array(d["_additional"]["vector"])
projection = float(np.dot(state_vector, direction_vector))
category = d.get("category", "unknown")
if category not in profile:
profile[category] = {}
profile[category][d["name"]] = round(projection, 4)
return profile
class OccasionManager:
"""
Orchestrateur du cycle d'occasion Ikario.
Gère Préhension → Concrescence → Satisfaction.
"""
def __init__(
self,
log_dir: str = "logs/occasions",
embedding_model=None
):
"""
Args:
log_dir: Répertoire pour les logs d'occasions
embedding_model: Modèle SentenceTransformer (optionnel)
"""
self.transformer = StateTransformer(embedding_model)
self.logger = OccasionLogger(log_dir)
self.current_occasion_id = self.logger.get_last_occasion_id() + 1
def run_occasion(self, trigger: Dict[str, Any]) -> Dict[str, Any]:
"""
Exécute un cycle complet d'occasion.
Args:
trigger: {
"type": "user" | "timer" | "event",
"content": str,
"metadata": dict (optionnel)
}
Returns:
{
"occasion_id": int,
"response": str,
"new_state_id": int,
"profile": dict,
"processing_time_ms": int
}
"""
start_time = time.time()
occasion_id = self.current_occasion_id
self.current_occasion_id += 1
print(f"\n[OccasionManager] === Occasion {occasion_id} ===")
print(f"[OccasionManager] Trigger: {trigger['type']} - {trigger['content'][:50]}...")
# ===== PHASE 1: PRÉHENSION =====
print("[OccasionManager] Phase 1: Préhension...")
prehension = self._prehend(trigger)
profile_before = get_state_profile(prehension['previous_state_id'])
# ===== PHASE 2: CONCRESCENCE =====
print("[OccasionManager] Phase 2: Concrescence...")
concrescence = self._concresce(trigger, prehension)
# ===== PHASE 3: SATISFACTION =====
print("[OccasionManager] Phase 3: Satisfaction...")
satisfaction = self._satisfy(occasion_id, trigger, prehension, concrescence)
# Profil après
profile_after = get_state_profile(satisfaction['new_state_id'])
# Logger l'occasion
processing_time = int((time.time() - start_time) * 1000)
log_entry = OccasionLog(
occasion_id=occasion_id,
timestamp=datetime.now().isoformat(),
trigger_type=trigger['type'],
trigger_content=trigger['content'][:500],
previous_state_id=prehension['previous_state_id'],
prehended_thoughts_count=len(prehension['thoughts']),
prehended_docs_count=len(prehension['documents']),
prehended_thoughts=[t.get('content', '')[:100] for t in prehension['thoughts'][:5]],
response_summary=concrescence['response'][:500],
new_thoughts=concrescence['new_thoughts'],
tools_used=concrescence['tools_used'],
new_state_id=satisfaction['new_state_id'],
alpha_used=satisfaction['alpha'],
beta_used=satisfaction['beta'],
profile_before=profile_before,
profile_after=profile_after,
processing_time_ms=processing_time
)
self.logger.log(log_entry)
print(f"[OccasionManager] Occasion {occasion_id} terminée en {processing_time}ms")
print(f"[OccasionManager] Nouvel état: S({satisfaction['new_state_id']})")
return {
'occasion_id': occasion_id,
'response': concrescence['response'],
'new_state_id': satisfaction['new_state_id'],
'profile': profile_after,
'processing_time_ms': processing_time
}
def _prehend(self, trigger: Dict[str, Any]) -> Dict[str, Any]:
"""
Phase de Préhension - récupération du contexte.
Récupère:
- L'état précédent
- Les pensées pertinentes
- Les documents pertinents
"""
current_state_id = self.transformer.get_current_state_id()
# Recherche sémantique dans les pensées
thoughts = self._search_thoughts(trigger['content'], limit=10)
# Recherche dans la bibliothèque
documents = self._search_library(trigger['content'], limit=5)
return {
'previous_state_id': current_state_id,
'previous_state_vector': self.transformer.get_state_vector(current_state_id),
'thoughts': thoughts,
'documents': documents
}
def _search_thoughts(self, query: str, limit: int = 10) -> List[Dict]:
"""Recherche sémantique dans les pensées."""
gql = {
"query": """
{
Get {
Thought(
nearText: {concepts: ["%s"]},
limit: %d
) {
content
timestamp
thought_type
}
}
}
""" % (query.replace('"', '\\"'), limit)
}
try:
response = requests.post(
f"{WEAVIATE_URL}/v1/graphql",
json=gql,
headers={"Content-Type": "application/json"}
)
if response.status_code == 200:
data = response.json()
return data.get("data", {}).get("Get", {}).get("Thought", []) or []
except Exception as e:
print(f"[OccasionManager] Erreur recherche pensées: {e}")
return []
def _search_library(self, query: str, limit: int = 5) -> List[Dict]:
"""Recherche sémantique dans la bibliothèque (Chunks)."""
gql = {
"query": """
{
Get {
Chunk(
nearText: {concepts: ["%s"]},
limit: %d
) {
content
source
chunk_type
}
}
}
""" % (query.replace('"', '\\"'), limit)
}
try:
response = requests.post(
f"{WEAVIATE_URL}/v1/graphql",
json=gql,
headers={"Content-Type": "application/json"}
)
if response.status_code == 200:
data = response.json()
return data.get("data", {}).get("Get", {}).get("Chunk", []) or []
except Exception as e:
print(f"[OccasionManager] Erreur recherche bibliothèque: {e}")
return []
def _concresce(
self,
trigger: Dict[str, Any],
prehension: Dict[str, Any]
) -> Dict[str, Any]:
"""
Phase de Concrescence - génération de la réponse.
NOTE: Dans cette version, on simule la concrescence.
L'intégration avec Claude Code SDK viendra en Phase 6.
"""
# Simulation - à remplacer par le SDK en Phase 6
context_summary = f"État S({prehension['previous_state_id']}), "
context_summary += f"{len(prehension['thoughts'])} pensées, "
context_summary += f"{len(prehension['documents'])} documents"
response = f"[Simulation] Réponse à: {trigger['content'][:100]}\n"
response += f"Contexte: {context_summary}"
return {
'response': response,
'new_thoughts': [], # Pas de pensées en simulation
'tools_used': ['search_thoughts', 'search_library'], # Simulation
'state_delta': {}
}
def _satisfy(
self,
occasion_id: int,
trigger: Dict[str, Any],
prehension: Dict[str, Any],
concrescence: Dict[str, Any]
) -> Dict[str, Any]:
"""
Phase de Satisfaction - création du nouvel état.
Persiste les nouvelles pensées et crée le nouvel état.
"""
# Persister les nouvelles pensées
for thought_content in concrescence['new_thoughts']:
self._add_thought(thought_content, occasion_id)
# Créer le nouvel état
new_state_id = self.transformer.create_new_state(
occasion={
'trigger_type': trigger['type'],
'trigger_content': trigger['content'],
'summary': concrescence['response'][:200]
},
response_text=concrescence['response'],
thoughts_created=len(concrescence['new_thoughts'])
)
# Récupérer les paramètres utilisés
alpha, beta = compute_adaptive_params({
'thoughts_created': len(concrescence['new_thoughts']),
'trigger_type': trigger['type'],
'trigger_content': trigger['content']
})
return {
'new_state_id': new_state_id,
'alpha': alpha,
'beta': beta,
'thoughts_persisted': len(concrescence['new_thoughts'])
}
def _add_thought(self, content: str, occasion_id: int):
"""Ajoute une nouvelle pensée dans Weaviate."""
thought = {
"content": content,
"timestamp": datetime.now().isoformat() + "Z",
"occasion_id": occasion_id,
"thought_type": "reflection"
}
# Générer l'embedding
embedding = self.transformer.model.encode(content)
embedding = embedding / np.linalg.norm(embedding)
response = requests.post(
f"{WEAVIATE_URL}/v1/objects",
json={
"class": "Thought",
"properties": thought,
"vector": embedding.tolist()
},
headers={"Content-Type": "application/json"}
)
if response.status_code in [200, 201]:
print(f"[OccasionManager] Pensée ajoutée: {content[:50]}...")
else:
print(f"[OccasionManager] Erreur ajout pensée: {response.status_code}")
# Test
if __name__ == "__main__":
manager = OccasionManager(log_dir="tests/temp_logs")
result = manager.run_occasion({
"type": "user",
"content": "Bonjour Ikario, parle-moi de ta vision processuelle selon Whitehead.",
"metadata": {}
})
print(f"\nRésultat:")
print(f" Occasion ID: {result['occasion_id']}")
print(f" Nouvel état: S({result['new_state_id']})")
print(f" Temps: {result['processing_time_ms']}ms")
print(f" Réponse: {result['response'][:100]}...")

View File

@@ -0,0 +1,258 @@
#!/usr/bin/env python3
"""
StateTransformation - Fonction de transition d'état S(t-1) → S(t).
Implémente la transformation processuelle selon Whitehead:
- Préhension: récupérer le contexte
- Concrescence: intégrer l'occasion
- Satisfaction: nouvel état stable
La formule de base:
S(t) = normalize(alpha * S(t-1) + beta * occasion_embedding)
où alpha (inertie) + beta (nouveauté) = 1
"""
import os
from datetime import datetime
from typing import Tuple, Dict, Any, Optional
import numpy as np
import requests
WEAVIATE_URL = os.getenv("WEAVIATE_URL", "http://localhost:8080")
def transform_state(
s_prev: np.ndarray,
occasion_embedding: np.ndarray,
alpha: float = 0.85,
beta: float = 0.15
) -> np.ndarray:
"""
Transforme S(t-1) en S(t) via l'occasion.
Le résultat est renormalisé pour rester sur l'hypersphère unitaire.
Args:
s_prev: Vecteur d'état précédent (normalisé)
occasion_embedding: Embedding de l'occasion/réponse (normalisé)
alpha: Coefficient d'inertie (conservation de l'identité)
beta: Coefficient de nouveauté (intégration de l'occasion)
Returns:
Nouveau vecteur d'état normalisé
"""
# Combinaison pondérée
s_new = alpha * s_prev + beta * occasion_embedding
# Renormalisation sur l'hypersphère
norm = np.linalg.norm(s_new)
if norm > 0:
s_new = s_new / norm
return s_new
def compute_adaptive_params(
occasion: Dict[str, Any],
base_alpha: float = 0.85,
base_beta: float = 0.15
) -> Tuple[float, float]:
"""
Calcule alpha/beta adaptatifs selon l'intensité de l'occasion.
Heuristiques:
- Plus de pensées créées → plus de beta (plus de changement)
- Trigger "timer" → moins de beta (auto-réflexion douce)
- Trigger "user" significatif → plus de beta
Args:
occasion: Dictionnaire avec trigger_type, thoughts_created, etc.
base_alpha: Alpha de base
base_beta: Beta de base
Returns:
Tuple (alpha, beta) ajustés
"""
# Ajuster selon le nombre de pensées créées
thoughts_count = occasion.get('thoughts_created', 0)
intensity = min(thoughts_count * 0.03, 0.10) # Max +10% de beta
# Trigger timer = plus d'inertie (réflexion douce)
if occasion.get('trigger_type') == 'timer':
intensity = intensity * 0.5
# Trigger user avec contenu long = plus d'impact
trigger_content = occasion.get('trigger_content', '')
if occasion.get('trigger_type') == 'user' and len(trigger_content) > 200:
intensity = intensity + 0.02
alpha = base_alpha - intensity
beta = base_beta + intensity
# S'assurer que alpha + beta = 1
total = alpha + beta
alpha = alpha / total
beta = beta / total
return alpha, beta
class StateTransformer:
"""Gère les transformations d'état et la persistance."""
def __init__(self, embedding_model=None):
"""
Args:
embedding_model: Modèle SentenceTransformer (chargé à la demande si None)
"""
self._model = embedding_model
self._model_loaded = embedding_model is not None
@property
def model(self):
"""Charge le modèle d'embedding à la demande."""
if not self._model_loaded:
from sentence_transformers import SentenceTransformer
print("[StateTransformer] Chargement du modèle BGE-M3...")
self._model = SentenceTransformer('BAAI/bge-m3')
self._model_loaded = True
return self._model
def get_current_state_id(self) -> int:
"""Retourne l'ID de l'état le plus récent."""
url = f"{WEAVIATE_URL}/v1/objects?class=StateVector&limit=100"
response = requests.get(url)
if response.status_code != 200:
return -1
objects = response.json().get("objects", [])
if not objects:
return -1
return max(obj.get("properties", {}).get("state_id", -1) for obj in objects)
def get_state_vector(self, state_id: int) -> Optional[np.ndarray]:
"""Récupère le vecteur d'un état."""
query = {
"query": """
{
Get {
StateVector(where: {
path: ["state_id"],
operator: Equal,
valueInt: %d
}) {
_additional {
vector
}
}
}
}
""" % state_id
}
response = requests.post(
f"{WEAVIATE_URL}/v1/graphql",
json=query,
headers={"Content-Type": "application/json"}
)
if response.status_code != 200:
return None
data = response.json()
states = data.get("data", {}).get("Get", {}).get("StateVector", [])
if not states:
return None
vector = states[0].get("_additional", {}).get("vector")
return np.array(vector) if vector else None
def create_new_state(
self,
occasion: Dict[str, Any],
response_text: str,
thoughts_created: int = 0
) -> int:
"""
Crée un nouvel état à partir de l'occasion.
Args:
occasion: {trigger_type, trigger_content, summary}
response_text: Texte de la réponse générée
thoughts_created: Nombre de pensées créées
Returns:
new_state_id
"""
# 1. Récupérer S(t-1)
current_id = self.get_current_state_id()
s_prev = self.get_state_vector(current_id)
if s_prev is None:
raise ValueError(f"État S({current_id}) non trouvé")
# 2. Calculer l'embedding de la réponse
occasion_embedding = self.model.encode(response_text)
occasion_embedding = occasion_embedding / np.linalg.norm(occasion_embedding)
# 3. Calculer les paramètres adaptatifs
alpha, beta = compute_adaptive_params({
'thoughts_created': thoughts_created,
'trigger_type': occasion.get('trigger_type', 'user'),
'trigger_content': occasion.get('trigger_content', '')
})
# 4. Transformer
s_new = transform_state(s_prev, occasion_embedding, alpha, beta)
# 5. Persister
new_state_id = current_id + 1
state_obj = {
"state_id": new_state_id,
"timestamp": datetime.now().isoformat() + "Z",
"previous_state_id": current_id,
"trigger_type": occasion.get('trigger_type', 'user'),
"trigger_content": occasion.get('trigger_content', '')[:500],
"occasion_summary": occasion.get('summary', '')[:500],
"response_summary": response_text[:500],
"thoughts_created": thoughts_created,
"source_thoughts_count": 0,
"source_messages_count": 0,
}
response = requests.post(
f"{WEAVIATE_URL}/v1/objects",
json={
"class": "StateVector",
"properties": state_obj,
"vector": s_new.tolist()
},
headers={"Content-Type": "application/json"}
)
if response.status_code not in [200, 201]:
raise RuntimeError(f"Erreur création S({new_state_id}): {response.text}")
print(f"[StateTransformer] État S({new_state_id}) créé (alpha={alpha:.2f}, beta={beta:.2f})")
return new_state_id
# Test simple
if __name__ == "__main__":
# Test de la transformation
s_prev = np.random.randn(1024)
s_prev = s_prev / np.linalg.norm(s_prev)
occasion = np.random.randn(1024)
occasion = occasion / np.linalg.norm(occasion)
s_new = transform_state(s_prev, occasion, alpha=0.85, beta=0.15)
print(f"Norme s_prev: {np.linalg.norm(s_prev):.4f}")
print(f"Norme s_new: {np.linalg.norm(s_new):.4f}")
print(f"Similarité s_prev/s_new: {np.dot(s_prev, s_new):.4f}")

View File

@@ -0,0 +1,140 @@
#!/usr/bin/env python3
"""Tests pour Phase 3 - Transformation d'état."""
import numpy as np
import pytest
from ..state_transformation import (
transform_state,
compute_adaptive_params,
StateTransformer
)
class TestTransformState:
"""Tests de la fonction de transformation."""
def test_transform_preserves_norm(self):
"""Le vecteur transformé doit rester normalisé."""
s_prev = np.random.randn(1024)
s_prev = s_prev / np.linalg.norm(s_prev)
occasion = np.random.randn(1024)
occasion = occasion / np.linalg.norm(occasion)
s_new = transform_state(s_prev, occasion)
assert abs(np.linalg.norm(s_new) - 1.0) < 0.001
def test_high_alpha_preserves_identity(self):
"""Alpha élevé = peu de changement."""
s_prev = np.random.randn(1024)
s_prev = s_prev / np.linalg.norm(s_prev)
occasion = np.random.randn(1024)
occasion = occasion / np.linalg.norm(occasion)
s_new = transform_state(s_prev, occasion, alpha=0.99, beta=0.01)
similarity = np.dot(s_prev, s_new)
assert similarity > 0.98, f"Trop de changement: similarity={similarity}"
def test_low_alpha_allows_change(self):
"""Alpha bas = plus de changement."""
s_prev = np.random.randn(1024)
s_prev = s_prev / np.linalg.norm(s_prev)
# Occasion très différente
occasion = -s_prev + 0.1 * np.random.randn(1024)
occasion = occasion / np.linalg.norm(occasion)
s_new = transform_state(s_prev, occasion, alpha=0.5, beta=0.5)
similarity = np.dot(s_prev, s_new)
assert similarity < 0.9, f"Pas assez de changement: similarity={similarity}"
def test_identical_occasion_increases_identity(self):
"""Si l'occasion est identique à l'état, l'identité est renforcée."""
s_prev = np.random.randn(1024)
s_prev = s_prev / np.linalg.norm(s_prev)
s_new = transform_state(s_prev, s_prev.copy(), alpha=0.85, beta=0.15)
# Doit rester très similaire
similarity = np.dot(s_prev, s_new)
assert similarity > 0.99
class TestAdaptiveParams:
"""Tests des paramètres adaptatifs."""
def test_default_params(self):
"""Paramètres par défaut."""
alpha, beta = compute_adaptive_params({})
assert abs(alpha + beta - 1.0) < 0.001
assert 0.8 < alpha < 0.9
assert 0.1 < beta < 0.2
def test_more_thoughts_increases_beta(self):
"""Plus de pensées = plus de beta."""
alpha1, beta1 = compute_adaptive_params({'thoughts_created': 0})
alpha2, beta2 = compute_adaptive_params({'thoughts_created': 5})
assert beta2 > beta1
assert alpha2 < alpha1
def test_timer_reduces_intensity(self):
"""Timer = moins d'intensité."""
alpha_user, beta_user = compute_adaptive_params({
'trigger_type': 'user',
'thoughts_created': 3
})
alpha_timer, beta_timer = compute_adaptive_params({
'trigger_type': 'timer',
'thoughts_created': 3
})
assert beta_timer < beta_user
assert alpha_timer > alpha_user
def test_params_sum_to_one(self):
"""Alpha + beta = 1 toujours."""
test_cases = [
{'thoughts_created': 0},
{'thoughts_created': 10},
{'trigger_type': 'timer'},
{'trigger_type': 'user', 'trigger_content': 'x' * 300},
]
for case in test_cases:
alpha, beta = compute_adaptive_params(case)
assert abs(alpha + beta - 1.0) < 0.001, f"Cas: {case}"
class TestStateTransformer:
"""Tests du StateTransformer (nécessite Weaviate)."""
@pytest.fixture
def transformer(self):
"""Créer un transformer sans modèle (tests unitaires)."""
return StateTransformer(embedding_model=None)
def test_get_current_state_id(self, transformer):
"""Test de récupération de l'ID courant."""
# Ce test nécessite Weaviate
state_id = transformer.get_current_state_id()
assert isinstance(state_id, int)
# -1 si pas d'état, sinon >= 0
assert state_id >= -1
@pytest.mark.skip(reason="Nécessite Weaviate avec S(0)")
def test_get_state_vector(self, transformer):
"""Test de récupération du vecteur d'état."""
vector = transformer.get_state_vector(0)
if vector is not None:
assert len(vector) == 1024
assert abs(np.linalg.norm(vector) - 1.0) < 0.01
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -0,0 +1,215 @@
#!/usr/bin/env python3
"""Tests pour Phase 4 - Logging des occasions."""
import tempfile
from datetime import datetime
from pathlib import Path
import pytest
from ..occasion_logger import OccasionLogger, OccasionLog
class TestOccasionLog:
"""Tests de la structure OccasionLog."""
def test_create_occasion_log(self):
"""Créer un OccasionLog basique."""
log = OccasionLog(
occasion_id=1,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content="Test",
previous_state_id=0,
prehended_thoughts_count=5,
prehended_docs_count=2,
response_summary="Response",
new_state_id=1,
alpha_used=0.85,
beta_used=0.15,
processing_time_ms=1000
)
assert log.occasion_id == 1
assert log.trigger_type == "user"
assert log.alpha_used == 0.85
def test_default_lists(self):
"""Les listes par défaut sont vides."""
log = OccasionLog(
occasion_id=1,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content="Test",
previous_state_id=0,
prehended_thoughts_count=0,
prehended_docs_count=0,
response_summary="Response",
new_state_id=1,
alpha_used=0.85,
beta_used=0.15
)
assert log.new_thoughts == []
assert log.tools_used == []
assert log.prehended_thoughts == []
class TestOccasionLogger:
"""Tests du logger d'occasions."""
@pytest.fixture
def temp_logger(self):
"""Créer un logger avec répertoire temporaire."""
with tempfile.TemporaryDirectory() as tmpdir:
yield OccasionLogger(tmpdir)
def test_log_and_retrieve(self, temp_logger):
"""Logger et relire une occasion."""
occasion = OccasionLog(
occasion_id=42,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content="Question test",
previous_state_id=5,
prehended_thoughts_count=3,
prehended_docs_count=1,
response_summary="Réponse test",
new_thoughts=["Nouvelle pensée"],
tools_used=["search_thoughts"],
new_state_id=6,
alpha_used=0.82,
beta_used=0.18,
processing_time_ms=2500
)
# Logger
filepath = temp_logger.log(occasion)
assert filepath.exists()
# Relire
loaded = temp_logger.get_occasion(42)
assert loaded is not None
assert loaded.occasion_id == 42
assert loaded.trigger_content == "Question test"
assert loaded.new_thoughts == ["Nouvelle pensée"]
def test_get_nonexistent(self, temp_logger):
"""Récupérer une occasion inexistante retourne None."""
loaded = temp_logger.get_occasion(99999)
assert loaded is None
def test_get_recent_occasions(self, temp_logger):
"""Récupérer les occasions récentes."""
# Créer plusieurs occasions
for i in range(5):
occasion = OccasionLog(
occasion_id=i,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content=f"Question {i}",
previous_state_id=max(0, i - 1),
prehended_thoughts_count=i,
prehended_docs_count=1,
response_summary=f"Réponse {i}",
new_state_id=i,
alpha_used=0.85,
beta_used=0.15,
processing_time_ms=1000 + i * 100
)
temp_logger.log(occasion)
# Récupérer les 3 dernières
recent = temp_logger.get_recent_occasions(3)
assert len(recent) == 3
# Vérifier l'ordre (plus récent d'abord)
assert recent[0].occasion_id == 4
assert recent[1].occasion_id == 3
assert recent[2].occasion_id == 2
def test_get_last_occasion_id(self, temp_logger):
"""Récupérer l'ID de la dernière occasion."""
# Vide
assert temp_logger.get_last_occasion_id() == -1
# Ajouter une occasion
occasion = OccasionLog(
occasion_id=10,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content="Test",
previous_state_id=0,
prehended_thoughts_count=0,
prehended_docs_count=0,
response_summary="Response",
new_state_id=1,
alpha_used=0.85,
beta_used=0.15
)
temp_logger.log(occasion)
assert temp_logger.get_last_occasion_id() == 10
def test_profile_evolution(self, temp_logger):
"""Tracer l'évolution d'une composante."""
# Créer des occasions avec évolution de curiosité
for i in range(5):
occasion = OccasionLog(
occasion_id=i,
timestamp=datetime.now().isoformat(),
trigger_type="user",
trigger_content=f"Question {i}",
previous_state_id=max(0, i - 1),
prehended_thoughts_count=0,
prehended_docs_count=0,
response_summary=f"Réponse {i}",
new_state_id=i,
alpha_used=0.85,
beta_used=0.15,
profile_before={"epistemic": {"curiosity": 0.5 + i * 0.05}},
profile_after={"epistemic": {"curiosity": 0.5 + (i + 1) * 0.05}}
)
temp_logger.log(occasion)
evolution = temp_logger.get_profile_evolution("curiosity", last_n=5)
assert len(evolution) == 5
# Vérifier que la curiosité augmente
values = [v for _, v in evolution]
assert values[-1] > values[0]
def test_statistics(self, temp_logger):
"""Calculer des statistiques."""
# Créer quelques occasions
for i in range(3):
occasion = OccasionLog(
occasion_id=i,
timestamp=datetime.now().isoformat(),
trigger_type="user" if i % 2 == 0 else "timer",
trigger_content=f"Question {i}",
previous_state_id=max(0, i - 1),
prehended_thoughts_count=0,
prehended_docs_count=0,
response_summary=f"Réponse {i}",
new_thoughts=["t1"] if i == 0 else [],
tools_used=["tool1", "tool2"],
new_state_id=i,
alpha_used=0.85,
beta_used=0.15,
processing_time_ms=1000 + i * 500
)
temp_logger.log(occasion)
stats = temp_logger.get_statistics()
assert stats["count"] == 3
assert "processing_time" in stats
assert "thoughts_created" in stats
assert "trigger_distribution" in stats
assert stats["trigger_distribution"]["user"] == 2
assert stats["trigger_distribution"]["timer"] == 1
if __name__ == "__main__":
pytest.main([__file__, "-v"])

View File

@@ -0,0 +1,167 @@
#!/usr/bin/env python3
"""Tests pour Phase 5 - OccasionManager."""
import tempfile
import pytest
from ..occasion_manager import OccasionManager, get_state_profile
class TestGetStateProfile:
"""Tests de la fonction get_state_profile."""
@pytest.mark.skip(reason="Nécessite Weaviate avec S(0) et directions")
def test_get_profile_s0(self):
"""Récupérer le profil de S(0)."""
profile = get_state_profile(0)
assert isinstance(profile, dict)
# Devrait avoir des catégories
assert len(profile) > 0
# Les valeurs doivent être dans [-1, 1]
for category, components in profile.items():
for name, value in components.items():
assert -1 <= value <= 1, f"{name} = {value} hors limites"
class TestOccasionManager:
"""Tests de l'OccasionManager."""
@pytest.fixture
def manager(self):
"""Créer un manager avec répertoire temporaire."""
with tempfile.TemporaryDirectory() as tmpdir:
yield OccasionManager(log_dir=tmpdir, embedding_model=None)
def test_manager_initialization(self, manager):
"""Test d'initialisation du manager."""
assert manager.current_occasion_id >= 0
assert manager.logger is not None
assert manager.transformer is not None
def test_prehend_structure(self, manager):
"""Test de la structure de préhension."""
trigger = {
"type": "user",
"content": "Test question",
"metadata": {}
}
prehension = manager._prehend(trigger)
assert "previous_state_id" in prehension
assert "thoughts" in prehension
assert "documents" in prehension
assert isinstance(prehension["thoughts"], list)
assert isinstance(prehension["documents"], list)
def test_concresce_simulation(self, manager):
"""Test de la concrescence (simulation)."""
trigger = {
"type": "user",
"content": "Test question sur Whitehead",
"metadata": {}
}
prehension = {
"previous_state_id": 0,
"previous_state_vector": None,
"thoughts": [{"content": "Pensée 1"}],
"documents": []
}
concrescence = manager._concresce(trigger, prehension)
assert "response" in concrescence
assert "new_thoughts" in concrescence
assert "tools_used" in concrescence
assert "[Simulation]" in concrescence["response"]
@pytest.mark.skip(reason="Nécessite Weaviate avec S(0)")
def test_run_occasion_full(self, manager):
"""Test d'un cycle complet d'occasion."""
trigger = {
"type": "user",
"content": "Qu'est-ce que le processus selon Whitehead ?",
"metadata": {}
}
result = manager.run_occasion(trigger)
assert "occasion_id" in result
assert "response" in result
assert "new_state_id" in result
assert "profile" in result
assert "processing_time_ms" in result
assert result["processing_time_ms"] > 0
@pytest.mark.skip(reason="Nécessite Weaviate avec S(0)")
def test_state_evolution_after_occasion(self, manager):
"""Vérifier que l'état évolue après une occasion."""
initial_state_id = manager.transformer.get_current_state_id()
trigger = {
"type": "user",
"content": "Je suis très curieux à propos de la philosophie",
"metadata": {}
}
result = manager.run_occasion(trigger)
assert result["new_state_id"] == initial_state_id + 1
@pytest.mark.skip(reason="Nécessite Weaviate")
def test_occasion_logged(self, manager):
"""Vérifier que l'occasion est loggée."""
trigger = {
"type": "user",
"content": "Test logging",
"metadata": {}
}
result = manager.run_occasion(trigger)
logged = manager.logger.get_occasion(result["occasion_id"])
assert logged is not None
assert logged.trigger_content == "Test logging"
class TestOccasionManagerTriggerTypes:
"""Tests des différents types de triggers."""
@pytest.fixture
def manager(self):
with tempfile.TemporaryDirectory() as tmpdir:
yield OccasionManager(log_dir=tmpdir, embedding_model=None)
def test_user_trigger(self, manager):
"""Test trigger utilisateur."""
trigger = {
"type": "user",
"content": "Question utilisateur",
"metadata": {}
}
prehension = manager._prehend(trigger)
concrescence = manager._concresce(trigger, prehension)
assert "Question utilisateur" in concrescence["response"]
def test_timer_trigger(self, manager):
"""Test trigger timer (auto-réflexion)."""
trigger = {
"type": "timer",
"content": "Moment d'auto-réflexion",
"metadata": {"auto": True}
}
prehension = manager._prehend(trigger)
concrescence = manager._concresce(trigger, prehension)
assert concrescence["response"] is not None
if __name__ == "__main__":
pytest.main([__file__, "-v"])