taiga_wiki_may07.py 6.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. # $Id$
  2. # $Author$
  3. # $log$
  4. import requests
  5. import urllib3
  6. urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
  7. base_url = 'https://192.168.130.161/taiga/api/v1'
  8. auth = requests.post(f'{base_url}/auth', json={'type': 'normal', 'username': 'FrancoisLange', 'password': 'BTSai123'}, verify=False).json()
  9. headers = {'Authorization': f'Bearer {auth["auth_token"]}', 'Content-Type': 'application/json'}
  10. proj_id = 21
  11. wiki_content = {
  12. "260507-daily": {
  13. "content": "# 26.05.07 DAILY SCRUM\n\n## What did you do yesterday?\n- Configured the Nginx reverse proxy to run on port 80.\n- Wrote automated bash scripts (`data_sync.sh` and `backup_db.sh`) for data freshness and disaster recovery.\n\n## What will you do today?\n- Fix the `import time` scope error that crashed the UI timers.\n- Inject Git `$Id$` version tracking across the entire codebase using `.gitattributes`.\n- Push all the final Scrum documentation to the Taiga Wiki and sync it locally to `/docs`.\n\n## Are there any impediments?\n- Git keyword expansion requires adding a `.gitattributes` file and replacing `os.path.getmtime` with `$Id$` in the Streamlit UI."
  14. },
  15. "260507-review": {
  16. "content": "# 26.05.07 SPRINT REVIEW\n\n## Sprint 10 Goal\nOptimize performance to remove SQL query freezing, and securely monitor the final architecture.\n\n## Demonstration\n- **Subquery First Strategy:** The Streamlit app no longer freezes during Clinical Data Search or Plate Builder. Queries execute in ~0.040 seconds.\n- **Teams Integration:** The Zabbix Webhook successfully transmitted the `Hello World` test message to the Microsoft Teams channel.\n- **Nginx Proxy:** The application is now natively served via HTTP port 80, handling WebSocket Upgrades perfectly.\n\n## Feedback\n- The UI execution timers are a great touch, proving the backend optimizations were successful.\n- Project is stable and ready for final Git commit and documentation handoff."
  17. },
  18. "260507-retrospective": {
  19. "content": "# 26.05.07 SPRINT RETROSPECTIVE\n\n## What went well?\n- Identifying the Cartesian explosion in MySQL caused by duplicate `code` entries in the OpenFoodFacts datasets.\n- Utilizing standard Nginx configurations to correctly map Streamlit WebSockets securely.\n\n## What could be improved?\n- The initial implementation of `time.time()` was accidentally scoped inside an email function, causing a `NameError`. Better unit testing before pushing to production would catch these scope errors.\n- Git commit messages lacked Taiga `TG-XXX` tags, requiring a retroactive script to sync the Taiga board.\n\n## Action Items\n- Use `TG-XXX` in all future Git commit messages.\n- Ensure `import` statements are strictly maintained at the top of Python modules."
  20. },
  21. "260507-plan": {
  22. "content": "# 26.05.07 SPRINT PLANNING (Day 2 Operations)\n\n## Goal\nTransition from Active Development to Day 2 Operations, focusing on infrastructure hardening and documentation.\n\n## Selected User Stories\n1. **Git Identity Keywords:** Inject `$Id$` headers and `.gitattributes` for native Git versioning.\n2. **Documentation Mirror:** Extract all Taiga Wiki Scrum pages and architectural documentation into a static `docs/` repository for Git syncing.\n3. **Final Report Generation:** Author a comprehensive report outlining what was accomplished and charting the course for future maintenance."
  23. },
  24. "devops-deploiement": {
  25. "content": "# DEVOPS & DÉPLOIEMENT\n\n## Docker Architecture\nThe project utilizes `docker-compose` to orchestrate 4 core containers:\n1. `app` (Streamlit Python UI)\n2. `mysql` (Database Backend)\n3. `nginx` (Reverse Proxy on Port 80 handling WebSockets)\n4. `ingest` (Ephemeral offline Data Ingestion Container)\n\n## Automated Cron Jobs (Day 2 Operations)\nTo ensure system stability over time, two Bash scripts must be configured in the host's `crontab`:\n\n### 1. Data Freshness (`data_sync.sh`)\nSyncs the OpenFoodFacts CSV files. Supports `--online` for `wget` scraping or offline mode for processing locally dropped files.\n\n### 2. Disaster Recovery (`backup_db.sh`)\nExecutes a `mysqldump` directly from the MySQL container, compressing the output to `gzip`. Enforces a strict 7-day retention policy to prevent storage exhaustion.\n\n## Git Versioning\nAll files utilize the `ident` property within `.gitattributes`, injecting real-time Git SHA-1 hashes into file `$Id$` variables for precise version tracking in production."
  26. },
  27. "architecture-technologies": {
  28. "content": "# ARCHITECTURE & TECHNOLOGIES\n\n## Frontend\n- **Streamlit (v1.30+)**: Handles all UI routing and data presentation asynchronously.\n\n## Backend Data\n- **MySQL 8.0**: Features robust horizontal table partitioning across the massive OpenFoodFacts dataset. Queries are heavily optimized using a \"Subquery-First\" limiting strategy to prevent Cartesian explosions during `LEFT JOIN` operations.\n\n## AI Inference Engine\n- **Ollama**: Hosted locally via Docker. Utilizes the **`llama3.1`** model exclusively, as the updated 3.1 architecture supports native API Tool Calling schemas (JSON output), which the Clinical RAG system relies heavily upon to search the MySQL database.\n\n## Monitoring & Alerting\n- **Zabbix**: Actively monitors Docker network health, SNMP traps, and Nginx reverse proxy HTTP codes.\n- **Microsoft Teams Integration**: Zabbix dynamically pushes critical alerts to a designated Microsoft Teams channel using a Python-configured Webhook MediaType."
  29. }
  30. }
  31. for slug, data in wiki_content.items():
  32. check_req = requests.get(f'{base_url}/wiki?project={proj_id}&slug={slug}', headers=headers, verify=False)
  33. if check_req.status_code == 200:
  34. wiki_pages = check_req.json()
  35. if len(wiki_pages) > 0:
  36. page_id = wiki_pages[0]['id']
  37. version = wiki_pages[0]['version']
  38. payload = {
  39. "project": proj_id,
  40. "slug": slug,
  41. "content": data["content"],
  42. "version": version
  43. }
  44. res = requests.put(f'{base_url}/wiki/{page_id}', json=payload, headers=headers, verify=False)
  45. if res.status_code == 200:
  46. print(f"Updated Wiki Page: {slug}")
  47. else:
  48. print(f"Failed to update {slug}: {res.text}")
  49. continue
  50. # If it doesn't exist, create it
  51. payload = {
  52. "project": proj_id,
  53. "slug": slug,
  54. "content": data["content"]
  55. }
  56. res = requests.post(f'{base_url}/wiki', json=payload, headers=headers, verify=False)
  57. if res.status_code == 201:
  58. print(f"Created Wiki Page: {slug}")
  59. else:
  60. print(f"Failed to create {slug}: {res.text}")