ingest_csv.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. import pandas as pd
  2. import myloginpath
  3. import urllib.parse
  4. from sqlalchemy import create_engine, text
  5. import os
  6. import sys
  7. def get_loader_engine():
  8. try:
  9. conf = myloginpath.parse('app_loader')
  10. user = conf.get('user')
  11. password = urllib.parse.quote_plus(conf.get('password'))
  12. host = conf.get('host', '127.0.0.1')
  13. database = 'food_db'
  14. # Build strict SQLAlchemy PyMySQL string
  15. conn_str = f"mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8mb4"
  16. return create_engine(conn_str)
  17. except Exception as e:
  18. print(f"❌ Failed to parse myloginpath or create engine: {e}")
  19. sys.exit(1)
  20. def ingest_file(filename, engine):
  21. if not os.path.exists(filename):
  22. print(f"File {filename} not found locally.")
  23. return False
  24. print(f"\n🚀 Found {filename}! Starting extreme batch ingestion...")
  25. chunk_size = 5000
  26. total_processed = 0
  27. # Read dynamically without filtering. Setting low_memory=False to let pandas parse column types flexibly
  28. # Forced utf-8 encoding to prevent French accent corruption on Windows OS defaults
  29. for chunk in pd.read_csv(filename, sep='\t', dtype=str, chunksize=chunk_size, on_bad_lines='skip', low_memory=False, encoding='utf-8'):
  30. try:
  31. # Drop duplicates by code natively
  32. if 'code' in chunk.columns:
  33. df = chunk.drop_duplicates(subset=['code'])
  34. else:
  35. # Eliminate completely empty columns to save storage
  36. df.dropna(axis=1, how='all', inplace=True)
  37. # Segment the dataframe into chunks of 50 columns each to bypass InnoDB constraints
  38. cols = list(df.columns)
  39. if 'code' in cols: cols.remove('code')
  40. chunk_size = 50
  41. chunks = [cols[i:i + chunk_size] for i in range(0, len(cols), chunk_size)]
  42. for i, col_chunk in enumerate(chunks):
  43. # Ensure 'code' maps across every single table
  44. table_name = f'products_{i+1}'
  45. df_slice = df[['code'] + col_chunk].copy()
  46. df_slice.to_sql(table_name, con=engine, if_exists='append', index=False)
  47. total_processed += len(df)
  48. print(f" Successfully appended {total_processed} rows (Dynamic schema)...", end="\r")
  49. except BaseException as e:
  50. if "Duplicate entry" in str(e):
  51. pass
  52. else:
  53. print(f"\n [Warning] Chunk skipped due to internal structural error: {e}")
  54. print(f"\n✅ Finished importing {filename}.")
  55. return True
  56. def create_indexes(engine):
  57. print("\n🛠️ Creating performance indexes on newly generated table...")
  58. # B-TREE and FULLTEXT INDEXES created post-ingestion for extreme speed
  59. try:
  60. with engine.begin() as connection:
  61. print(" Building Core Architecture on Partitions...")
  62. # Enforce Primary Keys on the first 4 partitions
  63. for i in range(1, 5):
  64. try:
  65. connection.execute(text(f"ALTER TABLE products_{i} MODIFY code VARCHAR(50);"))
  66. connection.execute(text(f"ALTER TABLE products_{i} ADD PRIMARY KEY (code);"))
  67. except: pass
  68. print(" Building Dynamic MySQL View...")
  69. # We build a massive Join View so the app doesn't need to know about the segments
  70. try:
  71. connection.execute(text("""
  72. CREATE VIEW products AS
  73. SELECT p1.*,
  74. p2.energy_100g, p2.`energy-kcal_100g`, p2.proteins_100g, p2.fat_100g, p2.carbohydrates_100g, p2.sugars_100g, p2.salt_100g, p2.sodium_100g, p2.fiber_100g,
  75. p3.iron_100g, p3.calcium_100g, p3.`vitamin-c_100g`, p3.`vitamin-d_100g`
  76. FROM products_1 p1
  77. LEFT JOIN products_2 p2 ON p1.code = p2.code
  78. LEFT JOIN products_3 p3 ON p1.code = p3.code
  79. """))
  80. except: pass
  81. print("✅ Indexing Complete!")
  82. except Exception as e:
  83. print(f"❌ Indexing encountered an issue: {e}")
  84. if __name__ == "__main__":
  85. print("Initiating OpenFoodFacts CSV Ingestion Process...")
  86. engine = get_loader_engine()
  87. processed_en = ingest_file('en.openfoodfacts.org.products.csv', engine)
  88. processed_fr = ingest_file('fr.openfoodfacts.org.products.csv', engine)
  89. if not processed_en and not processed_fr:
  90. print("\n❌ Could not find either 'en.openfoodfacts.org.products.csv' or 'fr.openfoodfacts.org.products.csv'.")
  91. print("Please download them directly into the root folder and run this script again.")
  92. else:
  93. # Build indexes now that all data is appended!
  94. create_indexes(engine)
  95. print("\n🎉 Full database reload and indexing complete! Ready for AI RAG.")