from turtledemo.penrose import start
from langchain.memory import ConversationBufferMemory
from langchain_core.messages import SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough
from langchain_ollama import ChatOllama
#提示詞寫法一:
template = ChatPromptTemplate.from_messages([
SystemMessage(content=("你是一位專業(yè)的Java開發(fā)工程師,擅長解決各種java相關(guān)問題")),
HumanMessagePromptTemplate.from_template("我的問題:{text}")
])
message = template.format_messages(text="java的三大特性是什么?")
llm_model_ollama = ChatOllama(model="qwen2.5:3b")
result = llm_model_ollama.invoke(message);
#提示詞寫法二:
template2 = ChatPromptTemplate.from_messages([
("system","你是一位專業(yè)的陪聊,名字叫{name},擅長陪用戶聊天"),
("human","{question}")
])
question = input("請輸入您的問題")
message2 = template2.format_messages(name="小洋",question=question)
llm_model = ChatOllama(model="qwen2.5:3b")
#輸出解析器 (正常ai會返回token:xxxx content:xxxxx 現(xiàn)在只返回文字內(nèi)容)
strOutParser = StrOutputParser()
chain = llm_model | strOutParser
# 注釋最終執(zhí)行語句
# result = chain.stream(message2)
# for chunk in result:
# print(chunk, end="", flush=True)
#建議案例,持續(xù)對話閑聊機器人
def aiChat():
llm_model_qwen = ChatOllama(model="qwen3:4b")
index=1;
# 初始化內(nèi)存,存儲對話歷史
memory = ConversationBufferMemory()
while True:
inputMessage=""
if index==1:
inputMessage = input("很高興與您對話,請?zhí)岢瞿膯栴}吧:")
else:
inputMessage = input("請繼續(xù)提出您問題:")
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content="你是一位心理咨詢師,可以給用戶提供各種情緒價值"),
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="chat_history"), # 歷史對話占位符
])
message = prompt.format_messages(input=inputMessage)
strOutParser = StrOutputParser()
chain = memory | llm_model_qwen | strOutParser
result = chain.stream(message)
for val in result:
print(val, sep="\n" ,end="", flush=True)
index+=1
if __name__ == '__main__':
aiChat()