ak = akasha.Doc_QA() ak.get_response(dir_path, prompt, model="anthropic:claude-3-5-sonnet-20241022")
可使用的模型
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
openai_model = "openai:gpt-3.5-turbo" # need environment variable "OPENAI_API_KEY" gemini_model="gemini:gemini-1.5-flash" # need environment variable "GEMINI_API_KEY" anthropic_model = "anthropic:claude-3-5-sonnet-20241022" # need environment variable "ANTHROPIC_API_KEY" huggingface_model = "hf:meta-llama/Llama-2-7b-chat-hf" #need environment variable "HUGGINGFACEHUB_API_TOKEN" to download meta-llama model quantized_ch_llama_model = "hf:FlagAlpha/Llama2-Chinese-13b-Chat-4bit" taiwan_llama_gptq = "hf:weiren119/Taiwan-LLaMa-v1.0-4bits-GPTQ" mistral = "hf:Mistral-7B-Instruct-v0.2" mediatek_Breeze = "hf:MediaTek-Research/Breeze-7B-Instruct-64k-v0.1" ### If you want to use llama-cpp to run model on cpu, you can download gguf version of models ### from https://huggingface.co/TheBloke/Llama-2-7b-Chat-GGUF and the name behind "llama-gpu:" or "llama-cpu:" ### from https://huggingface.co/TheBloke/CodeUp-Llama-2-13B-Chat-HF-GGUF ### is the path of the downloaded .gguf file llama_cpp_model = "llama-gpu:model/llama-2-13b-chat-hf.Q5_K_S.gguf" llama_cpp_model = "llama-cpu:model/llama-2-7b-chat.Q5_K_S.gguf" llama_cpp_chinese_alpaca = "llama-gpu:model/chinese-alpaca-2-7b.Q5_K_S.gguf" llama_cpp_chinese_alpaca = "llama-cpu:model/chinese-alpaca-2-13b.Q5_K_M.gguf" chatglm_model = "chatglm:THUDM/chatglm2-6b"
def test_model(prompt:str): import openai from langchain.chat_models import ChatOpenAI openai.api_type = "open_ai" model = ChatOpenAI(model="gpt-3.5-turbo", temperature = 0) ret = model.predict(prompt) return ret
model_obj = akasha.helper.handle_model("openai:gpt-3.5-turbo", False, 0.0) # this prompt ask LLM to response 'yes' or 'no' if the document segment is relevant to the user question or not. SYSTEM_PROMPT = akasha.prompts.default_doc_grader_prompt() documents = ["Doc1...", "Doc2...", "Doc3...", "Doc4..."] question = "五軸是什麼?"
prompts = ["document: " + doc +"\n\n" + "User Question: "+ question for doc in documents]