generate_docs.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. # $Id$
  2. # $Author$
  3. # $log$
  4. import os
  5. docs_dir = "docs"
  6. os.makedirs(docs_dir, exist_ok=True)
  7. docs = {
  8. "Final_Report.md": """# $Id$
  9. # Final Project Report (Living Document)
  10. ## What Has Been Done
  11. 1. **Core Architecture**: Deployed a resilient 4-container Docker Compose stack (MySQL, Nginx, Streamlit UI, Ollama Inference).
  12. 2. **Database Optimization**: Successfully loaded 4.4M+ OpenFoodFacts records and utilized advanced vertical partitioning and FULLTEXT indices.
  13. 3. **Clinical Subquery Strategy**: Refactored the core Pandas/SQL query pipeline to use subquery limiting, resolving Cartesian join explosions and reducing query latency to ~0.04s.
  14. 4. **Monitoring & Security**: Nginx securely proxies traffic on Port 80. Zabbix actively monitors the proxy and server health, dynamically reporting alerts to Microsoft Teams.
  15. 5. **Git Versioning**: Implemented Git `.gitattributes` to push `$Id$` tracking directly into the Python Application UI.
  16. ## What Needs To Be Done (Day 2 Operations)
  17. 1. **SSL/TLS Certificates**: The Nginx proxy is functional on HTTP port 80. Port 443 (HTTPS) must be configured with a Let's Encrypt certificate for true production encryption.
  18. 2. **User Acceptance Testing (UAT)**: Clinical dietitians should rigorously test the AI Chat constraints and Plate Builder to ensure edge cases are handled safely.
  19. 3. **Advanced Rate Limiting**: Limit the number of AI requests per user using a sliding window algorithm in `app.py`.
  20. ## What Is The Next Step
  21. - Execute the `data_sync.sh` cron job monthly.
  22. - Maintain the automated `backup_db.sh` 7-day retention cycle.
  23. - Begin the hand-off to the operational team for Phase 2 feature requests.
  24. """,
  25. "Backup_Procedure.md": """# $Id$
  26. # Database Backup Procedure
  27. ## Automated Backups
  28. The system utilizes a cron job pointing to `backup_db.sh`.
  29. - The script executes `mysqldump` directly inside the MySQL container.
  30. - Outputs are piped to `gzip` and stored in `/backups`.
  31. - A 7-day retention policy automatically purges old backups using `find ... -mtime +7 -exec rm`.
  32. ## Manual Restore
  33. To manually restore a backup:
  34. `gunzip < backups/food_db_20260507_0200.sql.gz | docker exec -i food_project-mysql-1 mysql -u root -proot_pass food_db`
  35. """,
  36. "Data_Ingestion.md": """# $Id$
  37. # Data Ingestion Pipeline
  38. ## Overview
  39. The application utilizes `data_sync.sh` to update the OpenFoodFacts dataset.
  40. ## Online Mode
  41. Run `bash data_sync.sh --online`. The script will download the latest CSV directly from the official servers and trigger the ingestion pipeline.
  42. ## Offline Mode
  43. Drop a `en.openfoodfacts.org.products.csv` file into the `/data` folder and run `bash data_sync.sh`. The script detects the file and triggers the Docker ingestion container.
  44. """,
  45. "Installation_Guide.md": """# $Id$
  46. # Installation Guide
  47. ## Requirements
  48. - Ubuntu 24.04 LTS (or WSL2)
  49. - Docker & Docker Compose
  50. - 16GB RAM Minimum
  51. ## Deployment Steps
  52. 1. `git clone https://git.btshub.lu/lanfr/LocalFoodAI_lanfr144.git`
  53. 2. `cd LocalFoodAI_lanfr144`
  54. 3. `chmod +x data_sync.sh backup_db.sh`
  55. 4. `docker-compose up -d --build`
  56. 5. Navigate to `http://<server-ip>`
  57. """,
  58. "User_Guide.md": """# $Id$
  59. # User Guide
  60. ## 1. Clinical Data Search
  61. Search for products using keywords. The system utilizes FULLTEXT matching to instantly return the top 10 relevant matches alongside macronutrient data.
  62. ## 2. My Plate Builder
  63. Add portion sizes of different foods to calculate cumulative nutritional intake. Use the 🗑️ icon to remove items.
  64. ## 3. Chat with AI
  65. Ask the `llama3.1` model complex dietary questions. It natively utilizes RAG Tool Calling to silently search the database and formulate clinical answers.
  66. """,
  67. "Wiki_Home.md": """# $Id$
  68. # Documentation Home
  69. Welcome to the static documentation mirror. Please navigate the markdown files in this directory for architectural diagrams and guides.
  70. """,
  71. "Scrum_Wiki.md": """# $Id$
  72. # Scrum Wiki Master List
  73. This file aggregates references to the Scrum daily logs, plans, and retrospectives.
  74. """,
  75. "Scrum_Daily.md": """# $Id$
  76. # Daily Scrums
  77. - **26.05.07 DAILY**: Fixed time scope bug, added Nginx proxy, built sync scripts.
  78. """,
  79. "Scrum_Plan.md": """# $Id$
  80. # Sprint Plans
  81. - **Sprint 10 PLAN**: Fix LLM Tool Calling, optimize Cartesian SQL explosion, build Teams webhooks.
  82. """,
  83. "Scrum_Retro.md": """# $Id$
  84. # Sprint Retrospectives
  85. - **Sprint 10 RETROSPECTIVE**: Mitigated dirty data duplicates using SQL `GROUP BY`. Need to maintain strict Git commit tagging (`TG-XXX`).
  86. """,
  87. "Scrum_Review.md": """# $Id$
  88. # Sprint Reviews
  89. - **Sprint 10 REVIEW**: App executes sub-second searches. Nginx fully operational on Port 80.
  90. """,
  91. "Scrum_Artifacts.md": """# $Id$
  92. # Scrum Artifacts
  93. Contains User Stories, velocity tracking, and burndown charts from Taiga.
  94. """,
  95. "Test_Cases_Sprint8.md": """# $Id$
  96. # Sprint 8 Legacy Test Cases
  97. - Tested RAG AI tool integration.
  98. - Tested user authentication flows.
  99. """,
  100. "WSL_Deployment.md": """# $Id$
  101. # WSL Deployment Runbook
  102. To deploy on Windows Subsystem for Linux:
  103. 1. Ensure WSL2 backend is enabled in Docker Desktop.
  104. 2. Follow standard Installation Guide inside the WSL Ubuntu terminal.
  105. """
  106. }
  107. for filename, content in docs.items():
  108. filepath = os.path.join(docs_dir, filename)
  109. with open(filepath, "w", encoding="utf-8") as f:
  110. f.write(content)
  111. print(f"Generated {filepath}")
  112. print("\nDocs directory perfectly mirrored.")