1
0

ingest_csv.py 4.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. import pandas as pd
  2. import myloginpath
  3. import urllib.parse
  4. from sqlalchemy import create_engine
  5. import os
  6. import sys
  7. def get_loader_engine():
  8. try:
  9. conf = myloginpath.parse('app_loader')
  10. user = conf.get('user')
  11. password = urllib.parse.quote_plus(conf.get('password'))
  12. host = conf.get('host', '127.0.0.1')
  13. database = 'food_db'
  14. # Build strict SQLAlchemy PyMySQL string
  15. conn_str = f"mysql+pymysql://{user}:{password}@{host}/{database}?charset=utf8mb4"
  16. return create_engine(conn_str)
  17. except Exception as e:
  18. print(f"❌ Failed to parse myloginpath or create engine: {e}")
  19. sys.exit(1)
  20. def ingest_file(filename, engine):
  21. if not os.path.exists(filename):
  22. print(f"File {filename} not found locally.")
  23. return False
  24. print(f"\n🚀 Found {filename}! Starting extreme batch ingestion...")
  25. chunk_size = 5000
  26. total_processed = 0
  27. # Read dynamically without filtering. Setting low_memory=False to let pandas parse column types flexibly
  28. for chunk in pd.read_csv(filename, sep='\t', dtype=str, chunksize=chunk_size, on_bad_lines='skip', low_memory=False):
  29. try:
  30. # Drop duplicates by code natively
  31. if 'code' in chunk.columns:
  32. df = chunk.drop_duplicates(subset=['code'])
  33. else:
  34. df = chunk
  35. df.to_sql('products', con=engine, if_exists='append', index=False)
  36. total_processed += len(df)
  37. print(f" Successfully appended {total_processed} rows (Dynamic schema)...", end="\r")
  38. except BaseException as e:
  39. if "Duplicate entry" in str(e):
  40. pass
  41. else:
  42. print(f"\n [Warning] Chunk skipped due to internal structural error: {e}")
  43. print(f"\n✅ Finished importing {filename}.")
  44. return True
  45. def create_indexes(engine):
  46. print("\n🛠️ Creating performance indexes on newly generated table...")
  47. # B-TREE and FULLTEXT INDEXES created post-ingestion for extreme speed
  48. try:
  49. with engine.begin() as connection:
  50. print(" Building Primary Key on `code`...")
  51. # We must make `code` the primary key if pandas just made it a TEXT field
  52. # But MySQL cannot have a TEXT field as PRIMARY KEY without a length constraint.
  53. # Convert code to VARCHAR(50) first.
  54. connection.execute(urllib.parse.unquote("ALTER TABLE products MODIFY code VARCHAR(50);"))
  55. connection.execute(urllib.parse.unquote("ALTER TABLE products ADD PRIMARY KEY (code);"))
  56. print(" Building Fulltext Indexes...")
  57. connection.execute(urllib.parse.unquote("CREATE FULLTEXT INDEX ft_idx_search ON products(product_name, ingredients_text, brands);"))
  58. print(" Building B-TREE Indexes on core macros...")
  59. # We attempt to index key macros if they exist
  60. macro_cols = ['energy-kcal_100g', 'fat_100g', 'carbohydrates_100g', 'proteins_100g']
  61. for col in macro_cols:
  62. # Convert TEXT to DOUBLE for numerical indexing and querying
  63. # We catch errors if the column doesn't exist to be safe
  64. try:
  65. connection.execute(urllib.parse.unquote(f"ALTER TABLE products MODIFY `{col}` DOUBLE;"))
  66. connection.execute(urllib.parse.unquote(f"CREATE INDEX idx_{col.replace('-', '_')} ON products(`{col}`);"))
  67. except:
  68. pass
  69. print("✅ Indexing Complete!")
  70. except Exception as e:
  71. print(f"❌ Indexing encountered an issue: {e}")
  72. if __name__ == "__main__":
  73. print("Initiating OpenFoodFacts CSV Ingestion Process...")
  74. engine = get_loader_engine()
  75. processed_en = ingest_file('en.openfoodfacts.org.products.csv', engine)
  76. processed_fr = ingest_file('fr.openfoodfacts.org.products.csv', engine)
  77. if not processed_en and not processed_fr:
  78. print("\n❌ Could not find either 'en.openfoodfacts.org.products.csv' or 'fr.openfoodfacts.org.products.csv'.")
  79. print("Please download them directly into the root folder and run this script again.")
  80. else:
  81. # Build indexes now that all data is appended!
  82. create_indexes(engine)
  83. print("\n🎉 Full database reload and indexing complete! Ready for AI RAG.")