Explorar o código

Deployment Finalization: Vitamin schemas, Green UI, and Taiga tools

lanfr144 hai 3 semanas
pai
achega
38a83a1bf0
Modificáronse 13 ficheiros con 415 adicións e 13 borrados
  1. 147 0
      alembic.ini
  2. 1 0
      alembic/README
  3. 78 0
      alembic/env.py
  4. 28 0
      alembic/script.py.mako
  5. 26 12
      app.py
  6. 23 0
      fetch_tasks.py
  7. 1 1
      ingest_csv.py
  8. 8 0
      master_trigger.sh
  9. 19 0
      setup_postfix.sh
  10. 24 0
      taiga_checker.py
  11. 12 0
      taiga_closeout.py
  12. 31 0
      taiga_feed.py
  13. 17 0
      test_mail.py

+ 147 - 0
alembic.ini

@@ -0,0 +1,147 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts.
+# this is typically a path given in POSIX (e.g. forward slashes)
+# format, relative to the token %(here)s which refers to the location of this
+# ini file
+script_location = %(here)s/alembic
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
+# for all available tokens
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.  for multiple paths, the path separator
+# is defined by "path_separator" below.
+prepend_sys_path = .
+
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the tzdata library which can be installed by adding
+# `alembic[tz]` to the pip requirements.
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to <script_location>/versions.  When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "path_separator"
+# below.
+# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions
+
+# path_separator; This indicates what character is used to split lists of file
+# paths, including version_locations and prepend_sys_path within configparser
+# files such as alembic.ini.
+# The default rendered in new alembic.ini files is "os", which uses os.pathsep
+# to provide os-dependent path splitting.
+#
+# Note that in order to support legacy alembic.ini files, this default does NOT
+# take place if path_separator is not present in alembic.ini.  If this
+# option is omitted entirely, fallback logic is as follows:
+#
+# 1. Parsing of the version_locations option falls back to using the legacy
+#    "version_path_separator" key, which if absent then falls back to the legacy
+#    behavior of splitting on spaces and/or commas.
+# 2. Parsing of the prepend_sys_path option falls back to the legacy
+#    behavior of splitting on spaces, commas, or colons.
+#
+# Valid values for path_separator are:
+#
+# path_separator = :
+# path_separator = ;
+# path_separator = space
+# path_separator = newline
+#
+# Use os.pathsep. Default configuration used for new projects.
+path_separator = os
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+# database URL.  This is consumed by the user-maintained env.py script only.
+# other means of configuring database URLs may be customized within the env.py
+# file.
+sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts.  See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module
+# hooks = ruff
+# ruff.type = module
+# ruff.module = ruff
+# ruff.options = check --fix REVISION_SCRIPT_FILENAME
+
+# Alternatively, use the exec runner to execute a binary found on your PATH
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = ruff
+# ruff.options = check --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration.  This is also consumed by the user-maintained
+# env.py script only.
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARNING
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARNING
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S

+ 1 - 0
alembic/README

@@ -0,0 +1 @@
+Generic single-database configuration.

+ 78 - 0
alembic/env.py

@@ -0,0 +1,78 @@
+from logging.config import fileConfig
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+    fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = None
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+    """Run migrations in 'offline' mode.
+
+    This configures the context with just a URL
+    and not an Engine, though an Engine is acceptable
+    here as well.  By skipping the Engine creation
+    we don't even need a DBAPI to be available.
+
+    Calls to context.execute() here emit the given string to the
+    script output.
+
+    """
+    url = config.get_main_option("sqlalchemy.url")
+    context.configure(
+        url=url,
+        target_metadata=target_metadata,
+        literal_binds=True,
+        dialect_opts={"paramstyle": "named"},
+    )
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+def run_migrations_online() -> None:
+    """Run migrations in 'online' mode.
+
+    In this scenario we need to create an Engine
+    and associate a connection with the context.
+
+    """
+    connectable = engine_from_config(
+        config.get_section(config.config_ini_section, {}),
+        prefix="sqlalchemy.",
+        poolclass=pool.NullPool,
+    )
+
+    with connectable.connect() as connection:
+        context.configure(
+            connection=connection, target_metadata=target_metadata
+        )
+
+        with context.begin_transaction():
+            context.run_migrations()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()

+ 28 - 0
alembic/script.py.mako

@@ -0,0 +1,28 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision: str = ${repr(up_revision)}
+down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
+branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
+depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+    """Upgrade schema."""
+    ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+    """Downgrade schema."""
+    ${downgrades if downgrades else "pass"}

+ 26 - 12
app.py

@@ -92,18 +92,18 @@ def register_user(username, password, email):
             cursor.execute("INSERT INTO users (username, password_hash, email) VALUES (%s, %s, %s)", (username, hashed, email))
             conn.commit()
         conn.close()
-        send_email(email, "Welcome to Local Food AI", f"Hello {username}, your account was securely created!")
+        send_email(email, "Welcome to Local Food AI", f"Hello {username}, your account was securely created!", to_name=username.title())
         return True
     except pymysql.err.IntegrityError:
         return False
 
-def send_email(to_email, subject, body):
+def send_email(to_email, subject, body, to_name="User"):
     try:
         msg = EmailMessage()
         msg.set_content(body)
         msg['Subject'] = subject
-        msg['From'] = "security@localfoodai.com"
-        msg['To'] = to_email
+        msg['From'] = '"Clinical Food AI System" <security@localfoodai.com>'
+        msg['To'] = f'"{to_name}" <{to_email}>'
         s = smtplib.SMTP('localhost', 25)
         s.send_message(msg)
         s.quit()
@@ -122,7 +122,7 @@ def reset_password(username, email):
             cursor.execute("UPDATE users SET password_hash = %s WHERE id = %s", (hashed, user['id']))
             conn.commit()
             conn.close()
-            send_email(email, "Password Reset", f"Your new temporary password is: {new_pass}")
+            send_email(email, "Password Reset", f"Your new temporary password is: {new_pass}", to_name=username.title())
             return True
     return False
 
@@ -258,7 +258,11 @@ with tab_chat:
         st.chat_message("assistant").write(ai_reply)
 
 def highlight_medical_warnings(row):
-    if '⚠️' in str(row.get('Medical Warning', '')): return ['background-color: rgba(255, 0, 0, 0.4); color: white;'] * len(row)
+    try:
+        val = str(row.get('Medical Warning', ''))
+        if '⚠️' in val: return ['background-color: rgba(255, 0, 0, 0.4); color: white;'] * len(row)
+        if '💚' in val: return ['background-color: rgba(0, 255, 0, 0.3); color: white;'] * len(row)
+    except: pass
     return [''] * len(row)
 
 with tab_explore:
@@ -284,7 +288,8 @@ with tab_explore:
                     l_str = "" if limit_rc == "All" else f"LIMIT {limit_rc}"
                     query = f"""
                         SELECT code, product_name, generic_name, brands, allergens, ingredients_text,
-                               proteins_100g, fat_100g, carbohydrates_100g, sugars_100g, sodium_100g, energy_kcal_100g
+                               proteins_100g, fat_100g, carbohydrates_100g, sugars_100g, sodium_100g, energy_kcal_100g,
+                               `vitamin-c_100g`, iron_100g, calcium_100g
                         FROM products 
                         WHERE MATCH(product_name, ingredients_text) AGAINST(%s IN NATURAL LANGUAGE MODE)
                         AND (proteins_100g >= %s OR proteins_100g IS NULL)
@@ -313,17 +318,26 @@ with tab_explore:
                                 
                                 # Disease Analytics
                                 if cat == 'illness':
-                                    if val == 'diabetes' and pd.notnull(row['sugars_100g']) and float(row['sugars_100g']) > 10.0:
+                                    if val == 'diabetes' and pd.notnull(row.get('sugars_100g')) and float(row['sugars_100g']) > 10.0:
                                         warns.append("⚠️ High Sugar (Diabetes)")
-                                    if (val == 'hypertension' or val == 'high bp') and pd.notnull(row['sodium_100g']) and float(row['sodium_100g']) > 1.5:
+                                    if (val == 'hypertension' or val == 'high bp') and pd.notnull(row.get('sodium_100g')) and float(row['sodium_100g']) > 1.5:
                                         warns.append("⚠️ High Salt (Hypertension)")
+                                    if val == 'scurvy' and pd.notnull(row.get('vitamin-c_100g')) and float(row['vitamin-c_100g']) > 0.005:
+                                        warns.append("💚 High Vitamin C (Scurvy Recommended)")
+                                    if val == 'anemia' and pd.notnull(row.get('iron_100g')) and float(row['iron_100g']) > 0.002:
+                                        warns.append("💚 High Iron (Anemia Recommended)")
                                         
                                 # Condition Analytics
                                 if cat == 'condition':
-                                    if val == 'pregnant' and ('cru' in ing_text or 'raw' in ing_text or 'viande crue' in ing_text):
-                                        warns.append("⚠️ Raw Foods (Pregnancy Toxoplasmosis)")
-                                    if val == 'low fat' and pd.notnull(row['fat_100g']) and float(row['fat_100g']) > 20.0:
+                                    if val == 'pregnant':
+                                        if ('cru' in ing_text or 'raw' in ing_text or 'viande crue' in ing_text):
+                                            warns.append("⚠️ Raw Foods (Pregnancy Toxoplasmosis)")
+                                        if pd.notnull(row.get('iron_100g')) and float(row['iron_100g']) > 0.002:
+                                            warns.append("💚 Med-High Iron (Pregnancy Health)")
+                                    if val == 'low fat' and pd.notnull(row.get('fat_100g')) and float(row['fat_100g']) > 20.0:
                                         warns.append("⚠️ High Fat")
+                                    if val == 'osteoporosis' and pd.notnull(row.get('calcium_100g')) and float(row['calcium_100g']) > 0.1:
+                                        warns.append("💚 High Calcium (Bone Health)")
                                         
                                 # Dietary Analytics (Best-Effort Keyword Filters)
                                 if cat == 'diet':

+ 23 - 0
fetch_tasks.py

@@ -0,0 +1,23 @@
+import requests
+import urllib3
+urllib3.disable_warnings()
+
+auth = requests.post(
+    'https://192.168.130.161/taiga/api/v1/auth', 
+    json={'type': 'normal', 'username': 'FrancoisLange', 'password': 'BTSai123'}, 
+    verify=False
+).json()
+
+headers = {'Authorization': f'Bearer {auth["auth_token"]}'}
+
+for pid in [18, 21]:
+    try:
+        tasks = requests.get(f'https://192.168.130.161/taiga/api/v1/tasks?project={pid}', headers=headers, verify=False).json()
+        if isinstance(tasks, list):
+            for t in tasks:
+                if str(t.get('ref')) in ['15', '16', '17', '18', '20', '21', '22']:
+                    status_id = t.get('status')
+                    status_info = requests.get(f'https://192.168.130.161/taiga/api/v1/task-statuses/{status_id}', headers=headers, verify=False).json() if status_id else {}
+                    print(f'Ref: TG-{t.get("ref")}, Status: {status_info.get("name", "Unknown")}, Subject: {t.get("subject")}')
+    except Exception as e:
+        print(f"Error fetching project {pid}: {e}")

+ 1 - 1
ingest_csv.py

@@ -69,7 +69,7 @@ def create_indexes(engine):
             
             print("  Building B-TREE Indexes on core macros...")
             # We attempt to index key macros if they exist
-            macro_cols = ['energy-kcal_100g', 'fat_100g', 'carbohydrates_100g', 'proteins_100g']
+            macro_cols = ['energy-kcal_100g', 'fat_100g', 'carbohydrates_100g', 'proteins_100g', 'sugars_100g', 'sodium_100g', 'iron_100g', 'calcium_100g', 'vitamin-c_100g']
             for col in macro_cols:
                 # Convert TEXT to DOUBLE for numerical indexing and querying
                 # We catch errors if the column doesn't exist to be safe

+ 8 - 0
master_trigger.sh

@@ -0,0 +1,8 @@
+#!/bin/bash
+# Natively reload all database logic without interactive blocks
+echo "Executing autonomous WSL reload..."
+pip3 install --break-system-packages pymysql pandas sqlalchemy sqlalchemy-utils cryptography openpyxl
+python3 setup_db.py
+echo "Spawning Batch Ingestion into background..."
+nohup bash start_batch_ingest.sh > ingest_log.txt 2>&1 &
+echo "Master pipeline triggered successfully."

+ 19 - 0
setup_postfix.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+# run this as root/sudo on the Ubuntu VM to configure SMTP for password resets
+
+echo "🔧 Installing and Configuring Postfix for Local Food AI..."
+
+sudo apt-get update
+# Non-interactive installation of postfix configured for local delivery
+sudo DEBIAN_FRONTEND=noninteractive apt-get install -y postfix
+
+echo "🔒 Disabling external relay to maintain 100% Privacy-First Architecture..."
+# Ensure postfix only listens to localhost for security
+sudo postconf -e "inet_interfaces = loopback-only"
+sudo postconf -e "mydestination = localhost.localdomain, localhost"
+
+echo "🔄 Restarting Mail Service..."
+sudo systemctl restart postfix
+sudo systemctl enable postfix
+
+echo "✅ Success! The 'Forgot Password' feature in the Streamlit UI will now officially route emails to users via the internal Ubuntu backbone!"

+ 24 - 0
taiga_checker.py

@@ -0,0 +1,24 @@
+import requests, urllib3
+urllib3.disable_warnings()
+
+auth = requests.post(
+    'https://192.168.130.161/taiga/api/v1/auth', 
+    json={'type': 'normal', 'username': 'lanfr1904@outlook.com', 'password': 'BTSai123'}, 
+    verify=False
+).json()
+headers = {'Authorization': f'Bearer {auth["auth_token"]}'}
+
+proj_id = 21
+
+print("=== User Stories missing Tasks ===")
+us_list = requests.get(f'https://192.168.130.161/taiga/api/v1/userstories?project={proj_id}', headers=headers, verify=False).json()
+for us in us_list:
+    tasks = requests.get(f'https://192.168.130.161/taiga/api/v1/tasks?user_story={us["id"]}', headers=headers, verify=False).json()
+    if len(tasks) == 0:
+        print(f"US #{us['ref']}: {us['subject']}")
+
+print("\n=== User Stories missing Points ===")
+for us in us_list:
+    if us.get('total_points') == 0 or us.get('total_points') is None:
+        print(f"US #{us['ref']}: {us['subject']} (Points: {us.get('total_points')})")
+

+ 12 - 0
taiga_closeout.py

@@ -0,0 +1,12 @@
+import requests, urllib3
+urllib3.disable_warnings()
+auth = requests.post('https://192.168.130.161/taiga/api/v1/auth', json={'type': 'normal', 'username': 'FrancoisLange', 'password': 'BTSai123'}, verify=False).json()
+headers = {'Authorization': f'Bearer {auth["auth_token"]}'}
+
+epic_statuses = requests.get('https://192.168.130.161/taiga/api/v1/epic-statuses?project=21', headers=headers, verify=False).json()
+epic_closed_status = next((s['id'] for s in epic_statuses if s['is_closed']), None)
+
+epic = requests.get('https://192.168.130.161/taiga/api/v1/epics/by_ref?ref=28&project=21', headers=headers, verify=False).json()
+if 'id' in epic:
+    resp = requests.patch(f'https://192.168.130.161/taiga/api/v1/epics/{epic["id"]}', headers=headers, json={'status': epic_closed_status, 'version': epic['version']}, verify=False)
+    print(f'Epic TG-{epic["ref"]} Closing Status: {resp.status_code}')

+ 31 - 0
taiga_feed.py

@@ -0,0 +1,31 @@
+import requests, urllib3
+urllib3.disable_warnings()
+
+auth = requests.post(
+    'https://192.168.130.161/taiga/api/v1/auth', 
+    json={'type': 'normal', 'username': 'lanfr1904@outlook.com', 'password': 'BTSai123'}, 
+    verify=False
+).json()
+headers = {'Authorization': f'Bearer {auth["auth_token"]}'}
+
+proj_id = 21
+
+pts = requests.get(f'https://192.168.130.161/taiga/api/v1/points?project={proj_id}', headers=headers, verify=False).json()
+pt_map = {p['value']: p['id'] for p in pts}
+five_pt_id = pt_map.get(5)
+roles = requests.get(f'https://192.168.130.161/taiga/api/v1/roles?project={proj_id}', headers=headers, verify=False).json()
+role_id = roles[0]['id']
+
+us_list = requests.get(f'https://192.168.130.161/taiga/api/v1/userstories?project={proj_id}', headers=headers, verify=False).json()
+for us in us_list:
+    if us.get('total_points') == 0 or us.get('total_points') is None:
+        points_payload = us.get('points', {})
+        points_payload[str(role_id)] = five_pt_id
+        
+        resp = requests.patch(
+            f'https://192.168.130.161/taiga/api/v1/userstories/{us["id"]}', 
+            headers=headers, 
+            json={'points': points_payload, 'version': us['version']}, 
+            verify=False
+        )
+        print(f"Patched US {us['ref']} to 5 Points! Status: {resp.status_code}")

+ 17 - 0
test_mail.py

@@ -0,0 +1,17 @@
+import smtplib
+from email.message import EmailMessage
+
+try:
+    msg = EmailMessage()
+    msg.set_content("This is an automated local environment test from the Clinical Food AI platform. If you are receiving this, your secure loopback postfix configuration is verified and functioning flawlessly over Port 25!")
+    msg['Subject'] = "Local Food AI: Internal Subnet Verification"
+    msg['From'] = "system@localfoodaimaster.com"
+    msg['To'] = '"Mr Lange François" <flange@pt.lu>'
+
+    # Strict loopback port explicitly targeting postfix configurations to bypass 0.0.0.0 leaks
+    s = smtplib.SMTP('localhost', 25)
+    s.send_message(msg)
+    s.quit()
+    print("✅ Email dispatched perfectly via local postfix socket!")
+except Exception as e:
+    print(f"❌ Failed to reach or broadcast via local SMTP Postfix. Error: {e}")