瀏覽代碼

TG-85: add graceful 404 handler for Ollama background pull

lanfr144 1 周之前
父節點
當前提交
9ece66be63
共有 1 個文件被更改,包括 10 次插入2 次删除
  1. 10 2
      app.py

+ 10 - 2
app.py

@@ -573,7 +573,11 @@ with tab_explore:
                                     response_stream = ollama.chat(model='llama3.2:1b', messages=[{'role': 'user', 'content': eval_prompt}], stream=True)
                                     st.write_stream(chunk['message']['content'] for chunk in response_stream)
                                 except Exception as e:
-                                    st.error(f"AI Evaluation Failed: {e}")
+                                    error_msg = str(e).lower()
+                                    if "404" in error_msg or "not found" in error_msg:
+                                        st.warning("⚠️ The AI engine is currently downloading its core models in the background. Please wait a minute and try again!")
+                                    else:
+                                        st.error(f"AI Evaluation Failed: {e}")
                     else:
                         st.warning("No products found matching those strict terms.")
             except Exception as e: st.error(f"SQL/Pandas Error: {e}")
@@ -772,6 +776,10 @@ with tab_planner:
                 st.markdown("### 📋 Your Professional Meal Plan")
                 st.write_stream(chunk['message']['content'] for chunk in response_stream)
             except Exception as e:
-                st.error(f"AI Generation Failed: {e}")
+                error_msg = str(e).lower()
+                if "404" in error_msg or "not found" in error_msg:
+                    st.warning("⚠️ The AI engine is currently downloading its core models in the background. Please wait a minute and try again!")
+                else:
+                    st.error(f"AI Generation Failed: {e}")
 
 if conn_reader: conn_reader.close()