This repository has been archived on 2024-10-27. You can view files and clone it, but cannot push or open issues or pull requests.
local-llm-server/other/gradio_chat.py

34 lines
922 B
Python
Raw Normal View History

import warnings
import gradio as gr
import openai
warnings.filterwarnings("ignore")
openai.api_key = 'null'
openai.api_base = 'http://localhost:5000/api/openai/v1'
def stream_response(prompt, history):
messages = []
for x in history:
messages.append({'role': 'user', 'content': x[0]})
messages.append({'role': 'assistant', 'content': x[1]})
messages.append({'role': 'user', 'content': prompt})
response = openai.ChatCompletion.create(
model='0',
messages=messages,
temperature=0,
max_tokens=300,
stream=True
)
message = ''
for chunk in response:
message += chunk['choices'][0]['delta']['content']
yield message
gr.ChatInterface(stream_response, examples=["hello", "hola", "merhaba"], title="Chatbot Demo", analytics_enabled=False, cache_examples=False, css='#component-0{height:100%!important}').queue().launch()