shavkatvicc's picture
Update app.py
71b3117 verified
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import torch
@st.cache_resource
def load_model():
model_name = "finiteautomata/bertweet-base-sentiment-analysis"
cache_dir = "/app/hf_cache" # Use the cache set in Dockerfile
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
model = AutoModelForSequenceClassification.from_pretrained(model_name, cache_dir=cache_dir)
return pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
# Load model
sentiment_pipeline = load_model()
# Streamlit UI
st.title("📝 Text Sentiment Analyzer")
st.write("Enter your text and click **Analyze** to see if it's Positive, Neutral, or Negative.")
# Text input
user_input = st.text_area("Enter text here:", height=150)
# Analyze button
if st.button("Analyze"):
if not user_input.strip():
st.warning("Please enter some text.")
else:
result = sentiment_pipeline(user_input)[0]
label = result["label"]
if label == "NEG":
sentiment = "Negative"
elif label == "NEU":
sentiment = "Neutral"
elif label == "POS":
sentiment = "Positive"
else:
sentiment = "Unknown"
st.success(f"**Sentiment:** {sentiment}")