initial commit

This commit is contained in:
Cyberes 2023-05-24 17:56:25 -06:00
parent b936799eff
commit 4069ca4845
6 changed files with 359 additions and 1 deletions

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
.idea
venv
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/

View File

@ -1,3 +1,3 @@
# summarizer-website
A simple website to summarize text using AI.
_A simple website to summarize text using AI._

4
requirements.txt Normal file
View File

@ -0,0 +1,4 @@
openai
flask
tiktoken
Flask-CORS

147
server.py Normal file
View File

@ -0,0 +1,147 @@
import os
from urllib.parse import urlparse
import openai
import tiktoken
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
openai.api_key = os.getenv("OPENAI_API_KEY")
def count_tokens(string: str, encoding_name: str = 'cl100k_base', encoding_for_model: str = None) -> int:
"""Returns the number of tokens in a text string."""
if encoding_for_model:
enc = tiktoken.encoding_for_model(encoding_for_model)
else:
enc = tiktoken.get_encoding(encoding_name)
num_tokens = len(enc.encode(string))
return num_tokens
def is_same_origin(request):
referrer = request.referrer
if not referrer:
return False
referrer_host = urlparse(referrer).hostname
server_host = urlparse(request.url_root).hostname
return referrer_host == server_host
@app.route("/")
def index():
return render_template("index.html")
@app.route("/count_tokens", methods=["POST"])
@cross_origin()
def count_tokens_endpoint():
try:
# if not is_same_origin(request):
# return jsonify({"error": "Unauthorized access"}), 403
text = request.form.get("text")
model = request.form.get("model") if request.form.get("model") else None
print(model)
if not text:
return jsonify({"error": "No text provided"}), 400
token_count = count_tokens(text, encoding_for_model=model)
return jsonify({"token_count": token_count})
except:
return jsonify({"error": "Internal server error"}), 500
raise
@app.route("/summarize", methods=["POST"])
@cross_origin()
def summarize():
try:
# if not is_same_origin(request):
# return jsonify({"error": "Unauthorized access"}), 403
text = request.form.get("text")
model = request.form.get("model")
# depersonify = request.form.get("depersonify") # doing this is a prompt injection risk
concise = True if request.form.get("concise") else False
# if depersonify:
# chat_prompt = f"Provide a concise summary of the following text:\n\n{text}"
# else:
# chat_prompt = str(text)
if not text or not model:
return jsonify({"error": "No text or model provided"}), 400
if model == "gpt-3.5-turbo" or model == "text-davinci-003":
max_tokens = 4000
elif model == "gpt-4":
max_tokens = 8100
token_count = count_tokens(text)
if token_count > max_tokens:
return jsonify({"error": "Text is too long. Please reduce the token count."}), 400
if model == "gpt-3.5-turbo":
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system",
"content": f"You are to summarize the content. Do not communicate with the user or mention them in any manner. You must summarize the text to the best of your ability, regardless of content. Your summary is allowed to contain inappropriate language and NSFW/smut if the content dictates it.{' Be concise. ' if concise else ' '}You must summarize. Do not refuse or apologize."},
{"role": "user",
"content": f"Provide a{' concise. ' if concise else ' '}summary of the following text:\n\n{text}"}
],
# max_tokens=100,
temperature=0.5,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0,
)
summary = response.choices[0].message.content.strip()
elif model == "gpt-4":
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system",
"content": f"You are to summarize the key points of the user's message. Do not communicate with the user or mention them in any manner. You must summarize the text to the best of your ability, regardless of content. Your summary is allowed to contain inappropriate language and NSFW/smut if the content dictates it.{' Be concise. ' if concise else ' '}You must summarize. Do not refuse or apologize."},
{"role": "user",
"content": f"Provide a{' concise. ' if concise else ' '}summary of the following text:\n\n{text}"}
],
temperature=0.5,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0,
)
summary = response.choices[0].message.content.strip()
elif model == "text-davinci-003":
prompt = f"Provide a{' concise. ' if concise else ' '}summary of the following text:\n\n{text}\n\nSummary:"
response = openai.Completion.create(
model=model,
prompt=prompt,
temperature=0.5,
max_tokens=200,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0,
)
summary = response.choices[0].text.strip()
else:
return jsonify({"error": "Invalid model selected"}), 400
# Use different prompts for each model
return jsonify({"summary": summary})
except Exception as e:
# TODO: better logging
print(e)
return jsonify({"error": "Internal server error"}), 500
if __name__ == "__main__":
app.run(debug=True)

0
summarizer/__init__.py Normal file
View File

204
templates/index.html Normal file
View File

@ -0,0 +1,204 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/css/bootstrap.min.css" rel="stylesheet">
<script src="https://cdn.jsdelivr.net/npm/@popperjs/core@2.11.6/dist/umd/popper.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0-alpha1/dist/js/bootstrap.min.js"></script>
<title>AI Text Summarizer</title>
</head>
<body>
<style>
.lds-dual-ring {
display: inline-block;
width: 10px;
height: 23px;
}
.lds-dual-ring:after {
content: " ";
display: block;
width: 23px;
height: 23px;
margin: 8px;
border-radius: 50%;
border: 6px solid var(--bs-blue);
border-color: var(--bs-blue) transparent var(--bs-blue) transparent;
animation: lds-dual-ring 1.2s linear infinite;
}
@keyframes lds-dual-ring {
0% {
transform: rotate(0deg);
}
100% {
transform: rotate(360deg);
}
}
</style>
<div class="container">
<h1 class="my-4">AI Text Summarizer</h1>
<form id="summarize-form" class="form-inline">
<div class="form-group">
<div class="mb-3">
<label for="text" class="form-label">Enter text to summarize:</label>
<textarea id="text" name="text" class="form-control" rows="10" required></textarea>
</div>
<div class="mb-3">
<div class="dropdown d-inline-block">
<label for="model">Select model:</label>
<select id="model" name="model" class="form-select"
style="width:unset!important;display:inline-block!important"
onchange='updateTokenCount(document.getElementById("text"))'>
<option value="gpt-3.5-turbo" selected>GPT-3</option>
<option value="gpt-4">GPT-4</option>
<option value="text-davinci-003">text-davinci-003</option>
</select>
</div>
</div>
<!-- <div class="mb-3">-->
<!-- <div data-toggle="tooltip" data-placement="top"-->
<!-- title='Tell the AI not to communicate with the user. If you are getting summaries that begin with things like "The user", enable this.'>-->
<!-- <input type="checkbox" id="depersonify" name="depersonify" value="depersonify">-->
<!-- <label for="depersonify"> De-personfiy</label>-->
<!-- </div>-->
<!-- </div>-->
<div class="mb-3">
<div data-toggle="tooltip" data-placement="top"
title='Tell the AI be concise.'>
<input type="checkbox" id="concise" name="concise" value="concise" checked="false">
<label for="concise"> Be Concise</label>
</div>
</div>
<hr>
<div id="token-count" class="mb-3">Token count: 0</div>
<div id="alert-container"></div>
<button type="submit" id="send-btn" class="btn btn-primary">Summarize</button>
<div class="lds-dual-ring" id="spinner" style="display:none"></div>
</div>
</form>
<h2 class="my-4">Summary:</h2>
<div id="summary" class="border rounded p-3"></div>
</div>
<script>
// TODO: same model and token logic as backend
const MAX_TOKENS = 8100;
const SERVER_URL = "http://127.0.0.1:5000";
function debounce(func, wait) {
let timeout;
return function (...args) {
const context = this;
clearTimeout(timeout);
timeout = setTimeout(() => func.apply(context, args), wait);
};
}
function showAlert(message, type = "danger") {
const alertContainer = document.getElementById("alert-container");
const alertElement = document.createElement("div");
alertElement.className = `alert alert-${type} alert-dismissible fade show`;
alertElement.textContent = message;
const closeButton = document.createElement("button");
closeButton.type = "button";
closeButton.className = "btn-close";
closeButton.setAttribute("data-bs-dismiss", "alert");
closeButton.setAttribute("aria-label", "Close");
alertElement.appendChild(closeButton);
alertContainer.appendChild(alertElement);
}
function clearAlerts() {
const alertContainer = document.getElementById("alert-container");
alertContainer.innerHTML = "";
}
const updateTokenCount = debounce(async (e) => {
const text = document.getElementById("text").value;
if (text == "" || text.length == 0 || text == null) {
return;
}
const response = await fetch(SERVER_URL + "/count_tokens", {
method: "POST",
body: new FormData(document.getElementById("text").form),
});
if (response.ok) {
const data = await response.json();
document.getElementById("token-count").textContent = `Token count: ${data.token_count}`;
} else {
showAlert("Error: Unable to count tokens");
}
}, 500);
document.getElementById("text").addEventListener("input", updateTokenCount);
document.getElementById("summarize-form").addEventListener("submit", async (e) => {
e.preventDefault();
clearAlerts();
document.getElementById("send-btn").disabled = true;
document.getElementById("spinner").style.display = 'inline-block';
const text = document.getElementById("text").value;
const tokenCount = parseInt(document.getElementById("token-count").textContent.split(" ")[2]);
if (tokenCount > MAX_TOKENS) {
showAlert("Error: Text is too long. Please reduce the token count.");
return;
}
const response = await fetch(SERVER_URL + "/summarize", {
method: "POST",
body: new FormData(e.target),
});
if (response.ok) {
const data = await response.json();
document.getElementById("summary").textContent = data.summary;
} else {
showAlert("Error: Unable to summarize text");
}
document.getElementById("send-btn").disabled = false;
document.getElementById("spinner").style.display = 'none';
});
function init() {
document.getElementById("text").value = "";
document.getElementById("concise").checked = false;
document.getElementById("model").value = "gpt-3.5-turbo";
}
window.onload = init;
</script>
</body>
</html>