Skip to content

Commit

Permalink
Use gpt-4-turbo and add rate limiter, remove async processing
Browse files Browse the repository at this point in the history
  • Loading branch information
Biunovich committed Apr 15, 2024
1 parent 2e0189c commit 95c4585
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions Bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dotenv import load_dotenv
from telegram import Update
from telegram.ext import (Application, CommandHandler, ContextTypes,
MessageHandler, filters)
MessageHandler, filters, AIORateLimiter)

logging.basicConfig(
filename="logs/bot.log",
Expand Down Expand Up @@ -48,7 +48,7 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
append_message(messages, "user", message)
try:
response = await openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
model="gpt-4-turbo",
messages=messages,
user=user_id
)
Expand Down Expand Up @@ -82,7 +82,7 @@ async def clean(update: Update, context: ContextTypes.DEFAULT_TYPE):
context.user_data["messages"] = []
await update.message.reply_text("Message history was cleaned!")

application = Application.builder().token(telegram_token).concurrent_updates(True).build()
application = Application.builder().token(telegram_token).rate_limiter(AIORateLimiter()).build()
application.add_handler(
MessageHandler(filters.TEXT & ~filters.COMMAND & filters.User(user_id=allowed_users, allow_empty=True), handle_message)
)
Expand Down

0 comments on commit 95c4585

Please sign in to comment.