課程相關code我都上完到github空間中:請 git clone https://github.com/kevin801221/AgenticU-The-Modular-Teaching-Hub-for-Modern-LLM-Agent-Frameworks.git 去下載相關程式碼。(可以幫忙點個星星唷!) LangChain 嵌入模型實戰指南 📚 參考資源 官方文檔 LangChain Embeddings 官方文檔 Embedding Models 整合列表 重要工具與平台 OpenAI Embeddings API - 商業級嵌入服務 Hugging Face Model Hub - 開源嵌入模型庫 MTEB Leaderboard - 嵌入模型效能排行榜 Ollama - 本地模型部署工具 Sentence Transformers - 語句嵌入專用庫 研究與基準 CLIP 論文 - 多模態嵌入的基礎 E5 系列模型 - 指令式嵌入模型 BGE 系列模型 - 中英文嵌入模型 目錄 嵌入基礎概念 模型選擇指南 商業嵌入服務 開源嵌入模型 本地部署方案 多模態嵌入 效能最佳化 實際應用案例 嵌入基礎概念 什麼是嵌入? 嵌入(Embedding)是將文字、圖像等高維度資料映射到低維度向量空間的技術,讓電腦能理解語意關係。 # 嵌入的基本概念 text_1 = "貓是可愛的動物" text_2 = "狗是忠誠的寵物" text_3 = "今天天氣很好" # 轉換為向量後 vector_1 = [0.2, 0.8, 0.1, ...] # 動物相關 vector_2 = [0.3, 0.7, 0.2, ...] # 動物相關(相似) vector_3 = [0.9, 0.1, 0.8, ...] # 天氣相關(不相似) 核心應用場景 def demonstrate_embedding_applications(): """展示嵌入的主要應用""" applications = { "語意搜尋": { "描述": "找到意思相近但用詞不同的內容", "範例": "搜尋'汽車' → 找到'轎車'、'車輛'、'交通工具'" }, "文件分類": { "描述": "根據內容自動分類文檔", "範例": "新聞分類、郵件分類、產品分類" }, "推薦系統": { "描述": "找到相似的內容進行推薦", "範例": "相似文章推薦、產品推薦" }, "異常檢測": { "描述": "識別與正常內容差異很大的文字", "範例": "垃圾郵件檢測、內容審核" } } return applications 向量相似度計算 import numpy as np def calculate_similarity(vector1, vector2, method="cosine"): """計算向量相似度""" if method == "cosine": # 餘弦相似度:最常用,範圍 -1 到 1 dot_product = np.dot(vector1, vector2) norm_a = np.linalg.norm(vector1) norm_b = np.linalg.norm(vector2) return dot_product / (norm_a * norm_b) elif method == "euclidean": # 歐幾里得距離:越小越相似 return np.linalg.norm(vector1 - vector2) elif method == "dot_product": # 點積:需要向量已正規化 return np.dot(vector1, vector2) # 使用範例 def compare_texts_with_embeddings(embedding_model, texts): """比較文字的語意相似度""" # 獲取嵌入向量 vectors = embedding_model.embed_documents(texts) # 計算兩兩相似度 similarity_matrix = [] for i, vec1 in enumerate(vectors): row = [] for j, vec2 in enumerate(vectors): similarity = calculate_similarity(vec1, vec2) row.append(similarity) similarity_matrix.append(row) return similarity_matrix 模型選擇指南 選擇決策樹 考量因素 商業服務 開源模型 本地部署 成本 按使用量付費 免費 硬體成本 效能 通常最佳 中等到優秀 取決於硬體 隱私 資料上傳雲端 可控制 完全本地 維護 無需維護 需要更新 需要管理 客製化 有限 可微調 完全控制 模型比較表 模型類型 代表模型 維度 語言支援 最適場景 OpenAI text-embedding-3-large 3072 多語言 高品質商業應用 多語言E5 multilingual-e5-large 1024 100+ 語言 多語言場景 BGE bge-large-zh-v1.5 1024 中英文 中文優化 本地模型 nomic-embed-text 768 英文為主 隱私優先 商業嵌入服務 OpenAI Embeddings 最受歡迎的商業嵌入服務,提供優秀的效能和易用性: from langchain_openai import OpenAIEmbeddings import os # 基本配置 openai_embeddings = OpenAIEmbeddings( model="text-embedding-3-large", # 或 text-embedding-3-small api_key=os.getenv("OPENAI_API_KEY") ) # 基本使用 def basic_openai_embedding_usage(): """OpenAI 嵌入基本使用""" # 單個查詢 query = "什麼是機器學習?" query_vector = openai_embeddings.embed_query(query) # 多個文檔 documents = [ "機器學習是人工智能的一個分支。", "深度學習使用神經網路進行學習。", "自然語言處理專注於理解人類語言。" ] doc_vectors = openai_embeddings.embed_documents(documents) print(f"查詢向量維度: {len(query_vector)}") print(f"文檔向量數量: {len(doc_vectors)}") return query_vector, doc_vectors # 進階配置 def advanced_openai_config(): """OpenAI 嵌入進階配置""" # 調整向量維度(節省成本) small_dim_embeddings = OpenAIEmbeddings( model="text-embedding-3-large", dimensions=1536, # 原始 3072 壓縮到 1536 ) # 批次處理設置 batch_embeddings = OpenAIEmbeddings( model="text-embedding-3-large", chunk_size=1000, # 每批處理的文檔數 max_retries=3, # 重試次數 request_timeout=60 # 超時設置 ) return small_dim_embeddings, batch_embeddings # 成本計算 def calculate_openai_cost(text_list, model="text-embedding-3-large"): """計算 OpenAI 嵌入成本""" import tiktoken # 獲取對應的編碼器 encoding = tiktoken.encoding_for_model("gpt-4") # 通用編碼器 total_tokens = 0 for text in text_list: tokens = len(encoding.encode(text)) total_tokens += tokens # 價格表(2024年價格,請查看最新價格) prices = { "text-embedding-3-large": 0.00013 / 1000, # 每1K tokens "text-embedding-3-small": 0.00002 / 1000, "text-embedding-ada-002": 0.00010 / 1000 } cost = total_tokens * prices.get(model, prices["text-embedding-3-large"]) return { "first_run_time": time_1, "cached_run_time": time_2, "speedup": speedup, "cache_stats": stats } 批次處理最佳化 def batch_embedding_optimizer(): """批次嵌入最佳化""" class BatchEmbeddingProcessor: def __init__(self, embedding_model, batch_size=100): self.embedding_model = embedding_model self.batch_size = batch_size def process_large_dataset(self, documents): """處理大型文檔集合""" all_embeddings = [] total_batches = (len(documents) + self.batch_size - 1) // self.batch_size print(f"處理 {len(documents)} 個文檔,分為 {total_batches} 批次") for i in range(0, len(documents), self.batch_size): batch = documents[i:i + self.batch_size] batch_num = i // self.batch_size + 1 print(f"處理批次 {batch_num}/{total_batches}...") try: batch_embeddings = self.embedding_model.embed_documents(batch) all_embeddings.extend(batch_embeddings) except Exception as e: print(f"批次 {batch_num} 處理失敗: {e}") # 添加空向量作為佔位符 all_embeddings.extend([None] * len(batch)) return all_embeddings def process_with_progress(self, documents): """帶進度條的處理""" try: from tqdm import tqdm all_embeddings = [] for i in tqdm(range(0, len(documents), self.batch_size), desc="嵌入處理"): batch = documents[i:i + self.batch_size] batch_embeddings = self.embedding_model.embed_documents(batch) all_embeddings.extend(batch_embeddings) return all_embeddings except ImportError: print("tqdm 未安裝,使用基本處理...") return self.process_large_dataset(documents) return BatchEmbeddingProcessor # 記憶體監控 def monitor_embedding_memory(): """監控嵌入過程的記憶體使用""" import psutil import os def memory_monitor_decorator(func): def wrapper(*args, **kwargs): process = psutil.Process(os.getpid()) # 記錄開始狀態 start_memory = process.memory_info().rss / 1024 / 1024 # MB # 執行函數 result = func(*args, **kwargs) # 記錄結束狀態 end_memory = process.memory_info().rss / 1024 / 1024 # MB memory_increase = end_memory - start_memory print(f"記憶體使用: {start_memory:.1f} MB → {end_memory:.1f} MB") print(f"記憶體增長: {memory_increase:.1f} MB") return result return wrapper return memory_monitor_decorator 並行處理 import concurrent.futures import threading from typing import List, Callable class ParallelEmbeddingProcessor: """並行嵌入處理器""" def __init__(self, embedding_models: List, max_workers: int = 4): self.embedding_models = embedding_models self.max_workers = max_workers self.lock = threading.Lock() def process_with_multiple_models(self, documents: List[str]): """使用多個模型並行處理""" results = {} def process_with_model(model_info): model_name, model = model_info try: embeddings = model.embed_documents(documents) with self.lock: results[model_name] = { "embeddings": embeddings, "dimension": len(embeddings[0]) if embeddings else 0, "success": True } except Exception as e: with self.lock: results[model_name] = { "error": str(e), "success": False } # 並行執行 with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: model_items = [(name, model) for name, model in self.embedding_models.items()] executor.map(process_with_model, model_items) return results def distributed_processing(self, documents: List[str], chunk_size: int = 1000): """分散式文檔處理""" # 將文檔分塊 document_chunks = [ documents[i:i + chunk_size] for i in range(0, len(documents), chunk_size) ] all_results = [] def process_chunk(chunk_info): chunk_idx, chunk = chunk_info # 選擇模型(輪詢) model_names = list(self.embedding_models.keys()) selected_model_name = model_names[chunk_idx % len(model_names)] selected_model = self.embedding_models[selected_model_name] try: embeddings = selected_model.embed_documents(chunk) return { "chunk_idx": chunk_idx, "embeddings": embeddings, "model_used": selected_model_name, "success": True } except Exception as e: return { "chunk_idx": chunk_idx, "error": str(e), "success": False } # 並行處理塊 with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: chunk_items = [(i, chunk) for i, chunk in enumerate(document_chunks)] chunk_results = list(executor.map(process_chunk, chunk_items)) # 重新組合結果 chunk_results.sort(key=lambda x: x["chunk_idx"]) final_embeddings = [] for result in chunk_results: if result["success"]: final_embeddings.extend(result["embeddings"]) else: print(f"塊 {result['chunk_idx']} 處理失敗: {result['error']}") return final_embeddings 實際應用案例 案例 1:智能文檔搜尋系統 def intelligent_document_search_system(): """智能文檔搜尋系統""" import faiss import numpy as np class DocumentSearchEngine: def __init__(self, embedding_model): self.embedding_model = embedding_model self.documents = [] self.embeddings = [] self.index = None def add_documents(self, documents: List[str]): """添加文檔到搜尋引擎""" print(f"正在嵌入 {len(documents)} 個文檔...") # 批次處理嵌入 batch_processor = batch_embedding_optimizer() processor = batch_processor(self.embedding_model, batch_size=50) new_embeddings = processor.process_with_progress(documents) # 更新文檔和嵌入 self.documents.extend(documents) self.embeddings.extend(new_embeddings) # 重建索引 self._build_index() print(f"已添加 {len(documents)} 個文檔,總計 {len(self.documents)} 個") def _build_index(self): """建立 FAISS 索引""" if not self.embeddings: return # 轉換為 numpy 陣列 embeddings_array = np.array(self.embeddings).astype('float32') # 建立 FAISS 索引 dimension = embeddings_array.shape[1] self.index = faiss.IndexFlatIP(dimension) # 內積索引 # 正規化向量(用於餘弦相似度) faiss.normalize_L2(embeddings_array) # 添加向量到索引 self.index.add(embeddings_array) print(f"FAISS 索引已建立,維度: {dimension}") def search(self, query: str, top_k: int = 5): """搜尋相關文檔""" if not self.index: return [] # 嵌入查詢 query_embedding = self.embedding_model.embed_query(query) query_vector = np.array([query_embedding]).astype('float32') # 正規化查詢向量 faiss.normalize_L2(query_vector) # 搜尋 scores, indices = self.index.search(query_vector, top_k) # 組織結果 results = [] for i, (score, idx) in enumerate(zip(scores[0], indices[0])): if idx < len(self.documents): results.append({ "rank": i + 1, "document": self.documents[idx], "similarity": float(score), "document_id": int(idx) }) return results def get_similar_documents(self, doc_id: int, top_k: int = 5): """找到與指定文檔相似的其他文檔""" if doc_id >= len(self.embeddings) or not self.index: return [] # 獲取指定文檔的嵌入 doc_embedding = np.array([self.embeddings[doc_id]]).astype('float32') faiss.normalize_L2(doc_embedding) # 搜尋(排除自己) scores, indices = self.index.search(doc_embedding, top_k + 1) results = [] for score, idx in zip(scores[0], indices[0]): if idx != doc_id and idx < len(self.documents): results.append({ "document": self.documents[idx], "similarity": float(score), "document_id": int(idx) }) return results[:top_k] return DocumentSearchEngine # 使用示範 def demo_document_search(): """文檔搜尋示範""" # 選擇嵌入模型 embedding_model = setup_multilingual_e5() # 或其他模型 # 創建搜尋引擎 DocumentSearchEngine = intelligent_document_search_system() search_engine = DocumentSearchEngine(embedding_model) # 範例文檔 sample_documents = [ "人工智能是模擬人類智能的技術。它包括機器學習、深度學習等方法。", "機器學習是人工智能的重要分支,讓電腦能從資料中學習規律。", "深度學習使用多層神經網路,在圖像識別和語音處理方面表現優異。", "自然語言處理專注於讓電腦理解和生成人類語言。", "電腦視覺讓機器能夠識別和理解圖像內容。", "強化學習通過獎勵和懲罰機制訓練智能代理。", "資料科學結合統計學和電腦科學來分析大量資料。", "雲端運算提供彈性和可擴展的計算資源。", "區塊鏈是一種分散式記帳技術,確保資料安全和透明。", "物聯網連接各種設備,實現智能化管理。" ] # 添加文檔 search_engine.add_documents(sample_documents) # 搜尋測試 test_queries = [ "什麼是深度學習?", "AI 人工智能應用", "數據分析方法" ] for query in test_queries: print(f"\n🔍 搜尋: {query}") results = search_engine.search(query, top_k=3) for result in results: print(f" {result['rank']}. 相似度: {result['similarity']:.3f}") print(f" {result['document'][:60]}...") return search_engine 案例 2:多語言內容分類 def multilingual_content_classifier(): """多語言內容分類系統""" from sklearn.metrics.pairwise import cosine_similarity import numpy as np class MultilingualClassifier: def __init__(self, embedding_model): self.embedding_model = embedding_model self.categories = {} self.category_embeddings = {} def add_category(self, category_name: str, example_texts: List[str]): """添加分類類別""" # 獲取範例文字的嵌入 embeddings = self.embedding_model.embed_documents(example_texts) # 計算類別的中心向量 center_embedding = np.mean(embeddings, axis=0) self.categories[category_name] = example_texts self.category_embeddings[category_name] = center_embedding print(f"已添加類別 '{category_name}',包含 {len(example_texts)} 個範例") def classify_text(self, text: str, threshold: float = 0.5): """分類單個文字""" if not self.category_embeddings: return {"error": "沒有可用的分類類別"} # 獲取文字嵌入 text_embedding = self.embedding_model.embed_query(text) # 計算與各類別的相似度 similarities = {} for category, category_embedding in self.category_embeddings.items(): similarity = cosine_similarity( [text_embedding], [category_embedding] )[0][0] similarities[category] = similarity # 找到最佳匹配 best_category = max(similarities, key=similarities.get) best_score = similarities[best_category] # 檢查是否超過閾值 if best_score < threshold: return { "category": "未知", "confidence": best_score, "all_scores": similarities } return { "category": best_category, "confidence": best_score, "all_scores": similarities } def batch_classify(self, texts: List[str]): """批次分類多個文字""" results = [] for text in texts: result = self.classify_text(text) result["text"] = text results.append(result) return results def evaluate_classification(self, test_data: List[tuple]): """評估分類效能""" correct_predictions = 0 total_predictions = len(test_data) category_stats = {} for text, true_category in test_data: prediction = self.classify_text(text) predicted_category = prediction["category"] # 統計準確率 if predicted_category == true_category: correct_predictions += 1 # 分類別統計 if true_category not in category_stats: category_stats[true_category] = {"correct": 0, "total": 0} category_stats[true_category]["total"] += 1 if predicted_category == true_category: category_stats[true_category]["correct"] += 1 # 計算整體準確率 overall_accuracy = correct_predictions / total_predictions # 計算各類別準確率 category_accuracies = {} for category, stats in category_stats.items(): category_accuracies[category] = stats["correct"] / stats["total"] return { "overall_accuracy": overall_accuracy, "category_accuracies": category_accuracies, "total_samples": total_predictions, "correct_predictions": correct_predictions } return MultilingualClassifier # 使用示範 def demo_multilingual_classification(): """多語言分類示範""" # 創建分類器 embedding_model = setup_multilingual_e5() MultilingualClassifier = multilingual_content_classifier() classifier = MultilingualClassifier(embedding_model) # 定義分類類別和範例 categories_data = { "科技": [ "人工智能正在改變世界", "Machine learning algorithms are improving", "新的程式語言框架發布", "Quantum computing breakthrough announced", "區塊鏈技術的最新應用" ], "體育": [ "足球比賽結果公佈", "Basketball championship finals", "奧運會準備工作進行中", "Tennis tournament highlights", "游泳世界紀錄被打破" ], "美食": [ "新餐廳開業優惠活動", "Traditional cooking recipes shared", "健康飲食的重要性", "Food festival this weekend", "素食料理製作方法" ] } # 添加類別 for category, examples in categories_data.items(): classifier.add_category(category, examples) # 測試分類 test_texts = [ "深度學習模型的新突破", # 科技 "Basketball player wins MVP award", # 體育 "義大利麵的製作秘訣", # 美食 "這是一個混合的句子,包含 AI 和 pizza" # 混合 ] print("\n🏷️ 分類測試:") for text in test_texts: result = classifier.classify_text(text) print(f"\n文字: {text}") print(f"分類: {result['category']}") print(f"信心度: {result['confidence']:.3f}") return classifier 案例 3:語意相似度分析 def semantic_similarity_analyzer(): """語意相似度分析工具""" import matplotlib.pyplot as plt import seaborn as sns import pandas as pd class SimilarityAnalyzer: def __init__(self, embedding_model): self.embedding_model = embedding_model def analyze_text_groups(self, text_groups: dict): """分析文字群組間的相似度""" all_texts = [] group_labels = [] # 收集所有文字和標籤 for group_name, texts in text_groups.items(): all_texts.extend(texts) group_labels.extend([group_name] * len(texts)) # 獲取所有嵌入 embeddings = self.embedding_model.embed_documents(all_texts) # 計算相似度矩陣 similarity_matrix = cosine_similarity(embeddings) # 創建 DataFrame df = pd.DataFrame( similarity_matrix, index=[f"{label}_{i}" for i, label in enumerate(group_labels)], columns=[f"{label}_{i}" for i, label in enumerate(group_labels)] ) return df, all_texts, group_labels def visualize_similarity_heatmap(self, similarity_df, title="語意相似度熱圖"): """視覺化相似度熱圖""" plt.figure(figsize=(12, 10)) sns.heatmap( similarity_df, annot=True, cmap="YlOrRd", center=0, square=True, fmt='.2f' ) plt.title(title) plt.tight_layout() plt.show() def find_outliers(self, texts: List[str], threshold: float = 0.3): """找出語意上的異常值""" embeddings = self.embedding_model.embed_documents(texts) similarity_matrix = cosine_similarity(embeddings) # 計算每個文字與其他文字的平均相似度 avg_similarities = [] for i in range(len(texts)): # 排除自己 others = [similarity_matrix[i][j] for j in range(len(texts)) if i != j] avg_similarity = np.mean(others) avg_similarities.append(avg_similarity) # 找出異常值 outliers = [] for i, avg_sim in enumerate(avg_similarities): if avg_sim < threshold: outliers.append({ "text": texts[i], "avg_similarity": avg_sim, "index": i }) return sorted(outliers, key=lambda x: x["avg_similarity"]) def cluster_similar_texts(self, texts: List[str], n_clusters: int = 3): """將相似文字聚類""" from sklearn.cluster import KMeans embeddings = self.embedding_model.embed_documents(texts) # K-means 聚類 kmeans = KMeans(n_clusters=n_clusters, random_state=42) cluster_labels = kmeans.fit_predict(embeddings) # 組織結果 clusters = {} for i, label in enumerate(cluster_labels): if label not in clusters: clusters[label] = [] clusters[label].append({ "text": texts[i], "index": i }) return clusters return SimilarityAnalyzer # 使用示範 def demo_similarity_analysis(): """相似度分析示範""" # 創建分析器 embedding_model = setup_multilingual_e5() SimilarityAnalyzer = semantic_similarity_analyzer() analyzer = SimilarityAnalyzer(embedding_model) # 測試文字群組 text_groups = { "AI技術": [ "機器學習幫助解決複雜問題", "深度學習在圖像識別表現優異", "自然語言處理理解人類語言", "人工智能改變各個行業" ], "程式設計": [ "Python 是流行的程式語言", "JavaScript 用於網頁開發", "演算法優化提升程式效能", "開源軟體促進技術發展" ], "不相關": [ "今天天氣很好", "我喜歡吃義大利麵", "電影很精彩" ] } # 分析群組相似度 similarity_df, all_texts, group_labels = analyzer.analyze_text_groups(text_groups) print("📊 群組間相似度分析完成") print(f"分析了 {len(all_texts)} 個文字,分為 {len(text_groups)} 個群組") # 找出異常值 outliers = analyzer.find_outliers(all_texts, threshold=0.4) if outliers: print(f"\n🔍 發現 {len(outliers)} 個語意異常值:") for outlier in outliers: print(f" • {outlier['text']} (相似度: {outlier['avg_similarity']:.3f})") # 自動聚類 clusters = analyzer.cluster_similar_texts(all_texts, n_clusters=3) print(f"\n🔗 自動聚類結果 ({len(clusters)} 個群組):") for cluster_id, cluster_texts in clusters.items(): print(f"\n 群組 {cluster_id}:") for item in cluster_texts: print(f" • {item['text']}") return analyzer, similarity_df 總結與最佳實踐 🎯 嵌入模型選擇指南 def embedding_selection_guide(): """嵌入模型選擇指南""" selection_matrix = { "商業場景": { "高品質需求": "OpenAI text-embedding-3-large", "成本敏感": "OpenAI text-embedding-3-small", "企業部署": "Azure OpenAI Embeddings" }, "開源方案": { "多語言支援": "multilingual-e5-large-instruct", "中文優化": "bge-large-zh-v1.5", "輕量級": "all-MiniLM-L6-v2" }, "本地部署": { "隱私優先": "Ollama + nomic-embed-text", "資源限制": "GPT4All embeddings", "自定義": "本地 Hugging Face 模型" }, "特殊需求": { "多模態": "OpenCLIP", "程式碼": "code-specific models", "領域特化": "fine-tuned models" } } return selection_matrix # 效能對比工具 def comprehensive_model_comparison(): """全面的模型比較工具""" models_to_test = { "OpenAI-Large": OpenAIEmbeddings(model="text-embedding-3-large"), "OpenAI-Small": OpenAIEmbeddings(model="text-embedding-3-small"), "E5-Multilingual": setup_multilingual_e5(), "BGE-Chinese": setup_bge_chinese() } test_scenarios = { "中文文字": ["人工智能改變世界", "機器學習很重要"], "英文文字": ["AI is transforming industries", "Machine learning is crucial"], "混合語言": ["AI人工智能", "Machine learning機器學習"], "技術術語": ["深度學習神經網路", "自然語言處理NLP"] } results = {} for model_name, model in models_to_test.items(): results[model_name] = {} for scenario_name, texts in test_scenarios.items(): try: start_time = time.time() embeddings = model.embed_documents(texts) end_time = time.time() results[model_name][scenario_name] = { "success": True, "time": end_time - start_time, "dimension": len(embeddings[0]), "avg_processing_time": (end_time - start_time) / len(texts) } except Exception as e: results[model_name][scenario_name] = { "success": False, "error": str(e) } return results 💡 最佳實踐建議 模型選擇 考慮成本、效能、隱私三大因素 先用小模型驗證,再升級到大模型 多語言場景優選多語言專用模型 效能最佳化 實作嵌入快取減少重複計算 使用批次處理提升吞吐量 監控記憶體使用避免 OOM 生產部署 本地部署考慮硬體資源 雲端服務注意 API 限制 實作降級和重試機制 品質保證 定期評估嵌入品質 監控相似度分佈 根據業務場景調整模型 嵌入模型是 RAG 系統的核心組件,正確的選擇和最佳化能顯著提升系統效能。記住,最好的模型是最適合你需求的模型,而不一定是最新或最大的模型! 本指南基於 LangChain 官方文檔和實際專案經驗編寫,持續更新以反映最新的技術發展和最佳實踐。 "total_tokens": total_tokens, "estimated_cost": cost, "cost_per_1k_tokens": prices[model] * 1000 } ### 其他商業服務 ```python # Cohere Embeddings from langchain_cohere import CohereEmbeddings cohere_embeddings = CohereEmbeddings( model="embed-multilingual-v3.0", # 支援多語言 cohere_api_key=os.getenv("COHERE_API_KEY") ) # Azure OpenAI from langchain_openai import AzureOpenAIEmbeddings azure_embeddings = AzureOpenAIEmbeddings( azure_deployment="your-embedding-deployment", openai_api_version="2023-05-15", azure_endpoint="https://your-resource.openai.azure.com/", api_key=os.getenv("AZURE_OPENAI_API_KEY") ) 開源嵌入模型 Hugging Face 模型 最豐富的開源模型生態系統: from langchain_huggingface import HuggingFaceEmbeddings import torch # 檢查可用設備 device = "cuda" if torch.cuda.is_available() else "cpu" print(f"使用設備: {device}") # 多語言 E5 模型(推薦) def setup_multilingual_e5(): """設置多語言 E5 模型""" model_name = "intfloat/multilingual-e5-large-instruct" embeddings = HuggingFaceEmbeddings( model_name=model_name, model_kwargs={ "device": device, "trust_remote_code": True }, encode_kwargs={ "normalize_embeddings": True, # 正規化向量 "show_progress_bar": True # 顯示進度條 } ) return embeddings # BGE 中文模型 def setup_bge_chinese(): """設置 BGE 中文模型""" model_name = "BAAI/bge-large-zh-v1.5" embeddings = HuggingFaceEmbeddings( model_name=model_name, model_kwargs={"device": device}, encode_kwargs={"normalize_embeddings": True} ) return embeddings # 輕量級模型(適合 CPU) def setup_lightweight_model(): """設置輕量級模型""" model_name = "sentence-transformers/all-MiniLM-L6-v2" embeddings = HuggingFaceEmbeddings( model_name=model_name, model_kwargs={"device": "cpu"}, encode_kwargs={"normalize_embeddings": True} ) return embeddings # 模型效能測試 def benchmark_models(): """比較不同模型的效能""" import time models = { "E5-Large": setup_multilingual_e5(), "BGE-Chinese": setup_bge_chinese(), "MiniLM": setup_lightweight_model() } test_texts = [ "人工智能是未來科技的重要方向。", "Machine learning is transforming industries.", "自然語言處理讓機器理解人類語言。" ] results = {} for model_name, model in models.items(): start_time = time.time() try: vectors = model.embed_documents(test_texts) end_time = time.time() results[model_name] = { "success": True, "time": end_time - start_time, "dimension": len(vectors[0]), "vectors_count": len(vectors) } except Exception as e: results[model_name] = { "success": False, "error": str(e) } return results 模型下載與管理 def manage_huggingface_models(): """管理 Hugging Face 模型下載和快取""" from huggingface_hub import snapshot_download import os # 設置快取目錄 cache_dir = "./models_cache" os.makedirs(cache_dir, exist_ok=True) # 預下載模型 models_to_download = [ "intfloat/multilingual-e5-large-instruct", "BAAI/bge-large-zh-v1.5", "sentence-transformers/all-MiniLM-L6-v2" ] for model_name in models_to_download: print(f"下載模型: {model_name}") try: snapshot_download( repo_id=model_name, cache_dir=cache_dir, resume_download=True ) print(f"✅ {model_name} 下載完成") except Exception as e: print(f"❌ {model_name} 下載失敗: {e}") return cache_dir 本地部署方案 Ollama 本地部署 簡單易用的本地模型運行工具: from langchain_ollama import OllamaEmbeddings # 基本設置 def setup_ollama_embeddings(): """設置 Ollama 嵌入模型""" # 確保已安裝 Ollama 並拉取模型 # 命令: ollama pull nomic-embed-text embeddings = OllamaEmbeddings( model="nomic-embed-text", base_url="http://localhost:11434" # 預設 Ollama 地址 ) return embeddings # 檢查 Ollama 服務狀態 def check_ollama_status(): """檢查 Ollama 服務狀態""" import requests try: response = requests.get("http://localhost:11434/api/tags") if response.status_code == 200: models = response.json().get("models", []) return { "status": "running", "available_models": [model["name"] for model in models] } else: return {"status": "error", "message": "Service not responding"} except requests.ConnectionError: return {"status": "offline", "message": "Ollama not running"} # 自動設置 Ollama def auto_setup_ollama(): """自動設置 Ollama 環境""" import subprocess import time # 檢查狀態 status = check_ollama_status() if status["status"] == "offline": print("Ollama 未運行,請先啟動 Ollama 服務") return None # 檢查是否有嵌入模型 embedding_models = ["nomic-embed-text", "all-minilm"] available_models = status.get("available_models", []) for model in embedding_models: if model not in available_models: print(f"拉取模型: {model}") try: subprocess.run(["ollama", "pull", model], check=True) print(f"✅ {model} 安裝完成") break except subprocess.CalledProcessError: print(f"❌ {model} 安裝失敗") continue return OllamaEmbeddings(model=model) GPT4All 本地方案 from langchain_community.embeddings import GPT4AllEmbeddings def setup_gpt4all_embeddings(): """設置 GPT4All 嵌入模型""" embeddings = GPT4AllEmbeddings( model_name="all-MiniLM-L6-v2.gguf2.f16.gguf", gpt4all_kwargs={"allow_download": True} ) return embeddings # 效能測試 def test_local_performance(): """測試本地模型效能""" import time import psutil import os # 測試文字 test_docs = [ "人工智能正在改變世界。", "機器學習是AI的重要分支。", "深度學習模擬人腦神經網路。", "自然語言處理理解人類語言。", "電腦視覺讓機器看懂圖像。" ] * 20 # 重複以測試批次處理 models = { "Ollama": setup_ollama_embeddings(), "GPT4All": setup_gpt4all_embeddings() } results = {} for model_name, model in models.items(): print(f"測試 {model_name}...") # 記錄系統資源 process = psutil.Process(os.getpid()) start_memory = process.memory_info().rss / 1024 / 1024 # MB start_time = time.time() try: vectors = model.embed_documents(test_docs) end_time = time.time() end_memory = process.memory_info().rss / 1024 / 1024 # MB results[model_name] = { "success": True, "processing_time": end_time - start_time, "memory_usage": end_memory - start_memory, "throughput": len(test_docs) / (end_time - start_time), "dimension": len(vectors[0]) if vectors else 0 } except Exception as e: results[model_name] = { "success": False, "error": str(e) } return results 多模態嵌入 CLIP 模型應用 處理文字和圖像的統一嵌入空間: # 安裝 OpenCLIP: pip install open_clip_torch from langchain_experimental.open_clip import OpenCLIPEmbeddings import torch def setup_clip_embeddings(): """設置 CLIP 多模態嵌入""" clip_embeddings = OpenCLIPEmbeddings( model_name="ViT-B-32", checkpoint="openai" ) return clip_embeddings def multimodal_search_demo(): """多模態搜尋示範""" clip_model = setup_clip_embeddings() # 準備圖像路徑 image_paths = [ "./images/cat.jpg", "./images/dog.jpg", "./images/car.jpg", "./images/tree.jpg" ] # 準備搜尋查詢 text_queries = [ "可愛的寵物", "交通工具", "自然風景" ] try: # 獲取圖像嵌入 image_embeddings = clip_model.embed_image(image_paths) # 獲取文字嵌入 text_embeddings = clip_model.embed_documents(text_queries) # 計算相似度矩陣 similarities = [] for text_emb in text_embeddings: row = [] for img_emb in image_embeddings: similarity = calculate_similarity(text_emb, img_emb) row.append(similarity) similarities.append(row) # 找到最匹配的圖像 results = {} for i, query in enumerate(text_queries): best_match_idx = similarities[i].index(max(similarities[i])) results[query] = { "best_image": image_paths[best_match_idx], "similarity": similarities[i][best_match_idx] } return results except Exception as e: print(f"多模態搜尋失敗: {e}") return None # 自定義多模態應用 def custom_multimodal_app(): """自定義多模態應用""" def search_images_by_text(text_query, image_database): """根據文字搜尋圖像""" clip_model = setup_clip_embeddings() # 獲取查詢嵌入 query_embedding = clip_model.embed_query(text_query) # 獲取所有圖像嵌入 image_embeddings = clip_model.embed_image(image_database) # 計算相似度並排序 similarities = [] for i, img_emb in enumerate(image_embeddings): similarity = calculate_similarity(query_embedding, img_emb) similarities.append((i, similarity)) # 按相似度排序 similarities.sort(key=lambda x: x[1], reverse=True) return similarities[:5] # 返回前5個最相似的 def search_text_by_image(image_path, text_database): """根據圖像搜尋文字""" clip_model = setup_clip_embeddings() # 獲取圖像嵌入 image_embedding = clip_model.embed_image([image_path])[0] # 獲取文字嵌入 text_embeddings = clip_model.embed_documents(text_database) # 計算相似度 similarities = [] for i, text_emb in enumerate(text_embeddings): similarity = calculate_similarity(image_embedding, text_emb) similarities.append((i, similarity)) similarities.sort(key=lambda x: x[1], reverse=True) return similarities[:5] return search_images_by_text, search_text_by_image 效能最佳化 嵌入快取系統 ```pythonfrom langchain.embeddings import CacheBackedEmbeddingsfrom langchain.storage import LocalFileStore, InMemoryStoreimport hashlibimport json class AdvancedEmbeddingCache: """進階嵌入快取系統""" def __init__(self, cache_type="file", cache_dir="./embedding_cache"): self.cache_type = cache_type self.cache_dir = cache_dir if cache_type == "file": self.store = LocalFileStore(cache_dir) elif cache_type == "memory": self.store = InMemoryStore() else: raise ValueError("cache_type must be 'file' or 'memory'") def create_cached_embeddings(self, base_embeddings, namespace=None): """創建帶快取的嵌入模型""" namespace = namespace or base_embeddings.model return CacheBackedEmbeddings.from_bytes_store( base_embeddings, self.store, namespace=namespace ) def get_cache_stats(self): """獲取快取統計""" if self.cache_type == "file": import os cache_files = [] for root, dirs, files in os.walk(self.cache_dir): for file in files: file_path = os.path.join(root, file) cache_files.append({ "file": file, "size": os.path.getsize(file_path), "modified": os.path.getmtime(file_path) }) total_size = sum(f["size"] for f in cache_files) return { "cache_type": "file", "total_files": len(cache_files), "total_size_mb": total_size / (1024 * 1024), "cache_dir": self.cache_dir } else: # 記憶體快取統計 return { "cache_type": "memory", "entries": len(self.store.store) if hasattr(self.store, 'store') else 0 } def clear_cache(self, older_than_days=None): """清理快取""" if self.cache_type == "file" and older_than_days: import os import time current_time = time.time() cutoff_time = current_time - (older_than_days * 24 * 3600) removed_count = 0 for root, dirs, files in os.walk(self.cache_dir): for file in files: file_path = os.path.join(root, file) if os.path.getmtime(file_path) < cutoff_time: os.remove(file_path) removed_count += 1 return {"removed_files": removed_count} elif self.cache_type == "memory": if hasattr(self.store, 'store'): self.store.store.clear() return {"status": "memory_cache_cleared"} 快取使用示範 def caching_performance_demo(): """快取效能示範""" import time from langchain_openai import OpenAIEmbeddings # 創建快取系統 cache_system = AdvancedEmbeddingCache(cache_type="file") # 原始嵌入模型 base_embeddings = OpenAIEmbeddings(model="text-embedding-3-small") # 帶快取的嵌入模型 cached_embeddings = cache_system.create_cached_embeddings(base_embeddings) # 測試文檔 test_docs = [ "人工智能是現代科技的重要分支。", "機器學習幫助電腦從資料中學習。", "深度學習使用多層神經網路。", "自然語言處理讓機器理解語言。" ] * 10 # 重複文檔來測試快取效果 # 第一次運行(建立快取) print("第一次運行(建立快取)...") start_time = time.time() embeddings_1 = cached_embeddings.embed_documents(test_docs) time_1 = time.time() - start_time print(f"時間: {time_1:.2f} 秒") # 第二次運行(使用快取) print("第二次運行(使用快取)...") start_time = time.time() embeddings_2 = cached_embeddings.embed_documents(test_docs) time_2 = time.time() - start_time print(f"時間: {time_2:.2f} 秒") # 計算速度提升 speedup = time_1 / time_2 if time_2 > 0 else float('inf') print(f"速度提升: {speedup:.1f}x") # 快取統計 stats = cache_system.get_cache_stats() print(f"快取統計: {stats}") return {