2
0

.env.example 2.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. # Example environment variables for Cognio
  2. # Copy this to .env and customize as needed
  3. # Database
  4. DB_PATH=./data/memory.db
  5. # Embeddings
  6. # Recommended models (from fastest/lightest to most accurate):
  7. # - all-MiniLM-L6-v2 (384-dim, FAST - good for small/medium datasets)
  8. # - paraphrase-MiniLM-L6-v2 (384-dim, better paraphrase detection)
  9. # - paraphrase-multilingual-MiniLM-L12-v2 (384-dim, multilingual)
  10. # - paraphrase-multilingual-mpnet-base-v2 (768-dim, multilingual, higher quality but slower)
  11. EMBED_MODEL=all-MiniLM-L6-v2
  12. EMBED_DEVICE=cpu
  13. EMBEDDING_CACHE_PATH=./data/embedding_cache.pkl
  14. # API Server
  15. API_HOST=0.0.0.0
  16. API_PORT=8080
  17. # API_KEY=your-secret-key-here # Uncomment to enable API key authentication
  18. # Search
  19. DEFAULT_SEARCH_LIMIT=5
  20. SIMILARITY_THRESHOLD=0.4
  21. HYBRID_ENABLED=true
  22. HYBRID_ALPHA=0.6
  23. HYBRID_MODE=rerank
  24. HYBRID_RERANK_TOPK=100
  25. # LEANN vector search (optional)
  26. LEANN_ENABLED=false
  27. LEANN_INDEX_PATH=./data/leann/memories.leann
  28. LEANN_BACKEND=hnsw
  29. LEANN_LAZY_BUILD=true
  30. LEANN_RECOMPUTE_ON_SEARCH=true
  31. LEANN_WARMUP_ON_START=false
  32. LEANN_IDLE_BUILD=false
  33. LEANN_IDLE_SECONDS=300
  34. LEANN_IDLE_CHECK_INTERVAL=60
  35. # Performance
  36. MAX_TEXT_LENGTH=10000
  37. BATCH_SIZE=32
  38. SUMMARIZE_THRESHOLD=50
  39. # Logging
  40. LOG_LEVEL=info
  41. # Auto-tagging with LLM
  42. AUTOTAG_ENABLED=true
  43. LLM_PROVIDER=groq
  44. # Groq Settings (RECOMMENDED - Free tier: 14,400 requests/day)
  45. # Get your API key from: https://console.groq.com/keys
  46. GROQ_API_KEY=your-groq-api-key-here
  47. # Recommended models (from cheapest to most powerful):
  48. # - llama-3.1-8b-instant ($0.05/$0.08 per 1M tokens - FASTEST, cheapest)
  49. # - gemma2-9b-it ($0.2/$0.2 per 1M tokens - balanced)
  50. # - llama-4-scout-17b-16e-instruct ($0.11/$0.34 per 1M tokens - vision support)
  51. # - openai/gpt-oss-20b ($0.1/$0.5 per 1M tokens - reasoning, prompt caching 50%)
  52. # - openai/gpt-oss-120b ($0.15/$0.75 per 1M tokens - BEST quality, reasoning, caching 50%)
  53. GROQ_MODEL=openai/gpt-oss-120b
  54. # OpenAI Settings (alternative - more expensive but widely available)
  55. # Get your API key from: https://platform.openai.com/api-keys
  56. # OPENAI_API_KEY=your-openai-api-key-here
  57. # OPENAI_MODEL=gpt-4o-mini
  58. # Summarization
  59. SUMMARIZATION_ENABLED=true
  60. # Methods: extractive (clustering-based, no API calls) or abstractive (LLM-based, uses API)
  61. SUMMARIZATION_METHOD=abstractive
  62. SUMMARIZATION_EMBED_MODEL=all-MiniLM-L6-v2