ingest_csv.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. import pandas as pd
  2. import myloginpath
  3. import urllib.parse
  4. from sqlalchemy import create_engine, text
  5. from sqlalchemy.types import VARCHAR, TEXT, DOUBLE
  6. import os
  7. import sys
  8. def get_loader_engine():
  9. try:
  10. conf = myloginpath.parse('app_loader')
  11. user = conf.get('user')
  12. password = urllib.parse.quote_plus(conf.get('password'))
  13. host = conf.get('host', '127.0.0.1')
  14. database = 'food_db'
  15. conn_str = f"mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8mb4"
  16. return create_engine(conn_str)
  17. except Exception as e:
  18. print(f"❌ Failed to parse myloginpath or create engine: {e}")
  19. sys.exit(1)
  20. def ingest_file(filename, engine):
  21. if not os.path.exists(filename):
  22. print(f"File {filename} not found locally.")
  23. return False
  24. print(f"\n🚀 Found {filename}! Starting grouped vertical partition ingestion...")
  25. chunk_size = 10000
  26. total_processed = 0
  27. # Define the groupings
  28. groups = {
  29. 'products_core': ['code', 'product_name', 'generic_name', 'brands', 'ingredients_text'],
  30. 'products_allergens': ['code', 'allergens'],
  31. 'products_macros': ['code', 'energy-kcal_100g', 'proteins_100g', 'fat_100g', 'carbohydrates_100g', 'sugars_100g', 'fiber_100g', 'sodium_100g', 'salt_100g', 'cholesterol_100g'],
  32. 'products_vitamins': ['code', 'vitamin-a_100g', 'vitamin-b1_100g', 'vitamin-b2_100g', 'vitamin-pp_100g', 'vitamin-b6_100g', 'vitamin-b9_100g', 'vitamin-b12_100g', 'vitamin-c_100g', 'vitamin-d_100g', 'vitamin-e_100g', 'vitamin-k_100g'],
  33. 'products_minerals': ['code', 'calcium_100g', 'iron_100g', 'magnesium_100g', 'potassium_100g', 'zinc_100g']
  34. }
  35. # Pre-calculate what to read
  36. all_required_cols = list(set([col for cols in groups.values() for col in cols]))
  37. for chunk in pd.read_csv(filename, sep='\t', dtype=str, chunksize=chunk_size, on_bad_lines='skip', low_memory=False, encoding='utf-8'):
  38. try:
  39. # Drop rows with missing codes
  40. if 'code' not in chunk.columns:
  41. continue
  42. df = chunk.dropna(subset=['code']).drop_duplicates(subset=['code']).copy()
  43. # Ensure all required columns exist in the chunk (fill with None if missing)
  44. for col in all_required_cols:
  45. if col not in df.columns:
  46. df[col] = None
  47. for table_name, columns in groups.items():
  48. slice_df = df[columns].copy()
  49. # Cast datatypes: core and allergens are TEXT, others are DOUBLE
  50. if table_name in ['products_core', 'products_allergens']:
  51. sql_dtypes = {col: TEXT() for col in columns if col != 'code'}
  52. sql_dtypes['code'] = VARCHAR(50)
  53. else:
  54. # Convert to numeric (double) safely
  55. for col in columns:
  56. if col != 'code':
  57. slice_df[col] = pd.to_numeric(slice_df[col], errors='coerce')
  58. sql_dtypes = {col: DOUBLE() for col in columns if col != 'code'}
  59. sql_dtypes['code'] = VARCHAR(50)
  60. # Write to temp table
  61. temp_name = f"temp_{table_name}"
  62. slice_df.to_sql(temp_name, con=engine, if_exists='replace', index=False, dtype=sql_dtypes)
  63. # INSERT IGNORE into final table
  64. with engine.begin() as conn:
  65. cols_str = ", ".join([f"`{c}`" for c in columns])
  66. conn.execute(text(f"INSERT IGNORE INTO {table_name} ({cols_str}) SELECT {cols_str} FROM {temp_name}"))
  67. conn.execute(text(f"DROP TABLE IF EXISTS {temp_name}"))
  68. total_processed += len(df)
  69. print(f" Successfully appended {total_processed} rows into grouped tables...", end="\r")
  70. except BaseException as e:
  71. print(f"\n [Warning] Chunk skipped due to error: {e}")
  72. print(f"\n✅ Finished importing {filename}.")
  73. return True
  74. if __name__ == "__main__":
  75. print("Initiating OpenFoodFacts Grouped Vertical Ingestion Process...")
  76. engine = get_loader_engine()
  77. processed_en = ingest_file('en.openfoodfacts.org.products.csv', engine)
  78. processed_fr = ingest_file('fr.openfoodfacts.org.products.csv', engine)
  79. if not processed_en and not processed_fr:
  80. print("\n❌ Could not find CSVs.")
  81. else:
  82. print("\n🎉 Full database reload complete! Ready for AI RAG.")