File size: 4,942 Bytes
88a3d14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from openai import OpenAI
from mistralai import Mistral
from llamaapi import LlamaAPI
import streamlit as st
from IPython.display import display, Math

# Sidebar for model selection
with st.sidebar:
    option = st.selectbox(
        'Please select your model',
        ('o3-mini','o3','GPT-5','GPT-4o','GPT-4o-mini','GPT-4.1','o4-mini','Mixtral 8x7B','Mixtral 8x22B', 'Mistral Large 2','Mistral NeMo',
         'Llama-3.1-405B','Llama-3.2-3B','Llama-3.3-70B'))
    st.write('You selected:', option)

    # API Key input
    api_key = st.text_input("Please Copy & Paste your API_KEY", key="chatbot_api_key", type="password")

    # Reset button
    if st.button('Reset Conversation'):
        st.session_state["messages"] = []
        st.info("Please change your API_KEY if you change model.")
    
    

# Title and caption
st.title("💬 AI Chatbot")
st.caption("🚀 Your Personal AI Assistant powered by Streamlit and LLMs")



# Initialize messages if not present in session state
if 'messages' not in st.session_state:
    st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]

# Display messages
for msg in st.session_state.messages:
    st.chat_message(msg["role"]).write(msg["content"])


# Chat input
if prompt := st.chat_input():
    if not api_key:
        st.info("Please add your API_KEY to go ahead.")
        st.stop()

    # Append user message to session state
    st.session_state.messages.append({"role": "user", "content": prompt})
    st.chat_message("user").write(prompt)

    # Client initialization based on selected model
    if option == 'Mixtral 8x7B':
        client = Mistral(api_key=api_key)
        response = client.chat.complete(model="open-mixtral-8x7b", messages=st.session_state.messages)
    elif option == 'Mixtral 8x22B':
        client = Mistral(api_key=api_key)
        response = client.chat.complete(model="open-mixtral-8x22b", messages=st.session_state.messages)
    elif option == 'Mistral Large 2':
        client = Mistral(api_key=api_key)
        response = client.chat.complete(model="mistral-large-2407", messages=st.session_state.messages)
    elif option == 'Mathstral':
        client = Mistral(api_key=api_key)
        response = client.chat.complete(model="mistralai/mathstral-7B-v0.1", messages=st.session_state.messages)
    elif option == 'Mistral NeMo':
        client = Mistral(api_key=api_key)
        response = client.chat.complete(model="open-mistral-nemo-2407", messages=st.session_state.messages)
    elif option == 'o3-mini':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="o3-mini-2025-01-31", messages=st.session_state.messages)        
    elif option == 'o3':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="o3", messages=st.session_state.messages)        
    elif option == 'GPT-5':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="gpt-5-2025-08-07", messages=st.session_state.messages)    
    elif option == 'GPT-4o':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="gpt-4o", messages=st.session_state.messages) 
    elif option == 'GPT-4o-mini':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="gpt-4o-mini", messages=st.session_state.messages)     
    elif option == 'GPT-4.1':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="gpt-4-turbo-2024-04-09", messages=st.session_state.messages)     
    elif option == 'o4-mini':
        client = OpenAI(api_key=api_key)
        response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
    elif option == 'Llama-3.1-405B':
        client = OpenAI(api_key=api_key,base_url="https://api.llama-api.com")
        response = client.chat.completions.create(model="llama3.1-405b", messages=st.session_state.messages, max_tokens=1000)
    elif option == 'Llama-3.2-3B':
        client = OpenAI(api_key=api_key,base_url="https://api.llama-api.com")
        response = client.chat.completions.create(model="llama3.2-3b", messages=st.session_state.messages, max_tokens=1000) 
    elif option == 'Llama-3.3-70B':
        client = OpenAI(api_key=api_key,base_url="https://api.llama-api.com")
        response = client.chat.completions.create(model="llama3.3-70b", messages=st.session_state.messages, max_tokens=1000)        
    else:
        st.error("Selected model is not supported.")
        st.stop()

                
    # Process response and update session state
    msg = response.choices[0].message.content
    st.session_state.messages.append({"role": "assistant", "content": msg})
    st.chat_message("assistant").write(msg)