mohamedachraf commited on
Commit
63e2e46
·
1 Parent(s): 6b53ad3

Add application file

Browse files
Files changed (1) hide show
  1. app.py +182 -0
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from langchain.text_splitter import CharacterTextSplitter
4
+ from langchain_community.document_loaders import UnstructuredFileLoader
5
+ from langchain.vectorstores.faiss import FAISS
6
+ from langchain.vectorstores.utils import DistanceStrategy
7
+ from langchain_community.embeddings import HuggingFaceEmbeddings
8
+
9
+ from langchain.chains import RetrievalQA
10
+ from langchain.prompts.prompt import PromptTemplate
11
+ from langchain.vectorstores.base import VectorStoreRetriever
12
+
13
+ import torch
14
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
15
+ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
16
+
17
+ from transformers import TextIteratorStreamer
18
+ from threading import Thread
19
+
20
+ # Prompt template
21
+ template = """Instruction:
22
+ You are an AI assistant for answering questions about the provided context.
23
+ You are given the following extracted parts of a long document and a question. Provide a detailed answer.
24
+ If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
25
+ =======
26
+ {context}
27
+ =======
28
+ Question: {question}
29
+ Output:\n"""
30
+
31
+ QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
32
+
33
+ # Load Phi-2 model from hugging face hub
34
+ model_id = "microsoft/phi-2"
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
37
+ model = AutoModelForCausalLM.from_pretrained(
38
+ model_id, torch_dtype=torch.float32, device_map="auto", trust_remote_code=True
39
+ )
40
+
41
+ # sentence transformers to be used in vector store
42
+ embeddings = HuggingFaceEmbeddings(
43
+ model_name="sentence-transformers/msmarco-distilbert-base-v4",
44
+ model_kwargs={"device": "cpu"},
45
+ encode_kwargs={"normalize_embeddings": False},
46
+ )
47
+
48
+
49
+ # Returns a faiss vector store retriever given a txt file
50
+ def prepare_vector_store_retriever(filename):
51
+ # Load data
52
+ loader = UnstructuredFileLoader(filename)
53
+ raw_documents = loader.load()
54
+
55
+ # Split the text
56
+ text_splitter = CharacterTextSplitter(
57
+ separator="\n\n", chunk_size=800, chunk_overlap=0, length_function=len
58
+ )
59
+
60
+ documents = text_splitter.split_documents(raw_documents)
61
+
62
+ # Creating a vectorstore
63
+ vectorstore = FAISS.from_documents(
64
+ documents, embeddings, distance_strategy=DistanceStrategy.DOT_PRODUCT
65
+ )
66
+
67
+ return VectorStoreRetriever(vectorstore=vectorstore, search_kwargs={"k": 2})
68
+
69
+
70
+ # Retrieveal QA chian
71
+ def get_retrieval_qa_chain(text_file, hf_model):
72
+ retriever = default_retriever
73
+ if text_file != default_text_file:
74
+ retriever = prepare_vector_store_retriever(text_file)
75
+
76
+ chain = RetrievalQA.from_chain_type(
77
+ llm=hf_model,
78
+ retriever=retriever,
79
+ chain_type_kwargs={"prompt": QA_PROMPT},
80
+ )
81
+ return chain
82
+
83
+
84
+ # Generates response using the question answering chain defined earlier
85
+ def generate(question, answer, text_file, max_new_tokens):
86
+ streamer = TextIteratorStreamer(
87
+ tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=300.0
88
+ )
89
+ phi2_pipeline = pipeline(
90
+ "text-generation",
91
+ tokenizer=tokenizer,
92
+ model=model,
93
+ max_new_tokens=max_new_tokens,
94
+ pad_token_id=tokenizer.eos_token_id,
95
+ eos_token_id=tokenizer.eos_token_id,
96
+ device_map="auto",
97
+ streamer=streamer,
98
+ )
99
+
100
+ hf_model = HuggingFacePipeline(pipeline=phi2_pipeline)
101
+ qa_chain = get_retrieval_qa_chain(text_file, hf_model)
102
+
103
+ query = f"{question}"
104
+
105
+ if len(tokenizer.tokenize(query)) >= 512:
106
+ query = "Repeat 'Your question is too long!'"
107
+
108
+ thread = Thread(target=qa_chain.invoke, kwargs={"input": {"query": query}})
109
+ thread.start()
110
+
111
+ response = ""
112
+ for token in streamer:
113
+ response += token
114
+ yield response.strip()
115
+
116
+
117
+ # replaces the retreiver in the question answering chain whenever a new file is uploaded
118
+ def upload_file(file):
119
+ return file, file
120
+
121
+
122
+ with gr.Blocks() as demo:
123
+ gr.Markdown(
124
+ """
125
+ # Retrieval Augmented Generation with Phi-2: Question Answering demo
126
+ ### This demo uses the Phi-2 language model and Retrieval Augmented Generation (RAG). It allows you to upload a txt file and ask the model questions related to the content of that file.
127
+ ### If you don't have one, there is a txt file already loaded, the new Oppenheimer movie's entire wikipedia page. The movie came out very recently in July, 2023, so the Phi-2 model is not aware of it.
128
+ The context size of the Phi-2 model is 2048 tokens, so even this medium size wikipedia page (11.5k tokens) does not fit in the context window.
129
+ Retrieval Augmented Generation (RAG) enables us to retrieve just the few small chunks of the document that are relevant to the our query and inject it into our prompt.
130
+ The model is then able to answer questions by incorporating knowledge from the newly provided document. RAG can be used with thousands of documents, but this demo is limited to just one txt file.
131
+ """
132
+ )
133
+
134
+ default_text_file = "Oppenheimer-movie-wiki.txt"
135
+ default_retriever = prepare_vector_store_retriever(default_text_file)
136
+
137
+ text_file = gr.State(default_text_file)
138
+
139
+ gr.Markdown(
140
+ "## Upload a txt file or Use the Default 'Oppenheimer-movie-wiki.txt' that has already been loaded"
141
+ )
142
+
143
+ file_name = gr.Textbox(
144
+ label="Loaded text file", value=default_text_file, lines=1, interactive=False
145
+ )
146
+ upload_button = gr.UploadButton(
147
+ label="Click to upload a text file", file_types=["text"], file_count="single"
148
+ )
149
+ upload_button.upload(upload_file, upload_button, [file_name, text_file])
150
+
151
+ gr.Markdown("## Enter your question")
152
+ tokens_slider = gr.Slider(
153
+ 8,
154
+ 256,
155
+ value=64,
156
+ label="Maximum new tokens",
157
+ info="A larger `max_new_tokens` parameter value gives you longer text responses but at the cost of a slower response time.",
158
+ )
159
+
160
+ with gr.Row():
161
+ with gr.Column():
162
+ ques = gr.Textbox(label="Question", placeholder="Enter text here", lines=3)
163
+ with gr.Column():
164
+ ans = gr.Textbox(label="Answer", lines=4, interactive=False)
165
+ with gr.Row():
166
+ with gr.Column():
167
+ btn = gr.Button("Submit")
168
+ with gr.Column():
169
+ clear = gr.ClearButton([ques, ans])
170
+
171
+ btn.click(fn=generate, inputs=[ques, ans, text_file, tokens_slider], outputs=[ans])
172
+ examples = gr.Examples(
173
+ examples=[
174
+ "Who portrayed J. Robert Oppenheimer in the new Oppenheimer movie?",
175
+ "In the plot of the movie, why did Lewis Strauss resent Robert Oppenheimer?",
176
+ "How much money did the Oppenheimer movie make at the US and global box office?",
177
+ "What score did the Oppenheimer movie get on Rotten Tomatoes and Metacritic?",
178
+ ],
179
+ inputs=[ques],
180
+ )
181
+
182
+ demo.queue().launch()