| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| model_name = "./ProTalkModel.safetensors" | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| tokenizer = AutoTokenizer.from_pretrained("./") | |
| model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if device=="cuda" else torch.float32).to(device) | |
| system_prompt = "You are ProTalk, a professional AI assistant. Remember everything in this conversation. Be polite, witty, and professional." | |
| chat_history = [] | |
| while True: | |
| user_input = input("User: ") | |
| if user_input.lower() == "exit": | |
| break | |
| chat_history.append(f"User: {user_input}") | |
| prompt = system_prompt + "\n" + "\n".join(chat_history) + "\nProTalk:" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) | |
| outputs = model.generate(**inputs, max_new_tokens=150, do_sample=True, temperature=0.7, top_p=0.9) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| print(f"ProTalk: {response}") | |
| chat_history.append(f"ProTalk: {response}") | |