Skip to content

Commit

Permalink
Add user cooldown + some bugfix
Browse files Browse the repository at this point in the history
  • Loading branch information
F33RNI committed Jul 23, 2023
1 parent aa741ad commit 86e2e70
Show file tree
Hide file tree
Showing 5 changed files with 191 additions and 57 deletions.
161 changes: 131 additions & 30 deletions QueueHandler.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,54 @@ def queue_to_list(request_response_queue: multiprocessing.Queue) -> list:
return queue_list


def _user_module_cooldown(config: dict,
messages: List[Dict],
request: RequestResponseContainer,
time_left_seconds: int) -> None:
"""
Sends cooldown message to the user
:param config:
:param messages:
:param request:
:param time_left_seconds:
:return:
"""
# Get user language
lang = UsersHandler.get_key_or_none(request.user, "lang", 0)

# Calculate time left
if time_left_seconds < 0:
time_left_seconds = 0
time_left_hours = time_left_seconds // 3600
time_left_minutes = (time_left_seconds - (time_left_hours * 3600)) // 60
time_left_seconds = time_left_seconds - (time_left_hours * 3600) - (time_left_minutes * 60)

# Convert to string (ex. 1h 20m 9s)
time_left_str = ""
if time_left_hours > 0:
if len(time_left_str) > 0:
time_left_str += " "
time_left_str += str(time_left_hours) + messages[lang]["hours"]
if time_left_minutes > 0:
if len(time_left_str) > 0:
time_left_str += " "
time_left_str += str(time_left_minutes) + messages[lang]["minutes"]
if time_left_seconds > 0:
if len(time_left_str) > 0:
time_left_str += " "
time_left_str += str(time_left_seconds) + messages[lang]["seconds"]
if time_left_str == "":
time_left_str = "0" + messages[lang]["seconds"]

# Generate cooldown message
request.response = messages[lang]["user_cooldown_error"].replace("\\n", "\n") \
.format(time_left_str,
messages[lang]["modules"][request.request_type])

# Send this message
BotHandler.async_helper(BotHandler.send_message_async(config, messages, request, end=True))


def _request_processor(config: dict,
messages: List[Dict],
logging_queue: multiprocessing.Queue,
Expand Down Expand Up @@ -231,46 +279,97 @@ def _request_processor(config: dict,

# ChatGPT
if request_.request_type == RequestResponseContainer.REQUEST_TYPE_CHATGPT:
proxy_ = None
if proxy and config["chatgpt"]["proxy"] == "auto":
proxy_ = proxy
chatgpt_module.initialize(proxy_)
chatgpt_module.process_request(request_)
chatgpt_module.exit()
chatgpt_user_last_request_timestamp = UsersHandler.get_key_or_none(request_.user, "timestamp_chatgpt", 0)
time_passed_seconds = int(time.time()) - chatgpt_user_last_request_timestamp
if time_passed_seconds < config["chatgpt"]["user_cooldown_seconds"]:
request_.error = True
logging.warning("User {0} sends ChatGPT requests too quickly!".format(request_.user["user_id"]))
_user_module_cooldown(config, messages, request_,
config["chatgpt"]["user_cooldown_seconds"] - time_passed_seconds)
else:
request_.user["timestamp_chatgpt"] = int(time.time())
users_handler.save_user(request_.user)
proxy_ = None
if proxy and config["chatgpt"]["proxy"] == "auto":
proxy_ = proxy
chatgpt_module.initialize(proxy_)
chatgpt_module.process_request(request_)
chatgpt_module.exit()

# DALL-E
elif request_.request_type == RequestResponseContainer.REQUEST_TYPE_DALLE:
proxy_ = None
if proxy and config["dalle"]["proxy"] == "auto":
proxy_ = proxy
dalle_module.initialize(proxy_)
dalle_module.process_request(request_)
dalle_user_last_request_timestamp = UsersHandler.get_key_or_none(request_.user, "timestamp_dalle", 0)
time_passed_seconds = int(time.time()) - dalle_user_last_request_timestamp
if time_passed_seconds < config["dalle"]["user_cooldown_seconds"]:
request_.error = True
logging.warning("User {0} sends DALL-E requests too quickly!".format(request_.user["user_id"]))
_user_module_cooldown(config, messages, request_,
config["dalle"]["user_cooldown_seconds"] - time_passed_seconds)
else:
request_.user["timestamp_dalle"] = int(time.time())
users_handler.save_user(request_.user)
proxy_ = None
if proxy and config["dalle"]["proxy"] == "auto":
proxy_ = proxy
dalle_module.initialize(proxy_)
dalle_module.process_request(request_)

# EdgeGPT
elif request_.request_type == RequestResponseContainer.REQUEST_TYPE_EDGEGPT:
proxy_ = None
if proxy and config["chatgpt"]["proxy"] == "auto":
proxy_ = proxy
edgegpt_module.initialize(proxy_)
edgegpt_module.process_request(request_)
edgegpt_module.exit()
edgegpt_user_last_request_timestamp = UsersHandler.get_key_or_none(request_.user, "timestamp_edgegpt", 0)
time_passed_seconds = int(time.time()) - edgegpt_user_last_request_timestamp
if time_passed_seconds < config["edgegpt"]["user_cooldown_seconds"]:
request_.error = True
logging.warning("User {0} sends EdgeGPT requests too quickly!".format(request_.user["user_id"]))
_user_module_cooldown(config, messages, request_,
config["edgegpt"]["user_cooldown_seconds"] - time_passed_seconds)
else:
request_.user["timestamp_edgegpt"] = int(time.time())
users_handler.save_user(request_.user)
proxy_ = None
if proxy and config["edgegpt"]["proxy"] == "auto":
proxy_ = proxy
edgegpt_module.initialize(proxy_)
edgegpt_module.process_request(request_)
edgegpt_module.exit()

# Bard
elif request_.request_type == RequestResponseContainer.REQUEST_TYPE_BARD:
proxy_ = None
if proxy and config["bard"]["proxy"] == "auto":
proxy_ = proxy
bard_module.initialize(proxy_)
bard_module.process_request(request_)
bard_module.exit()
bard_user_last_request_timestamp = UsersHandler.get_key_or_none(request_.user, "timestamp_bard", 0)
time_passed_seconds = int(time.time()) - bard_user_last_request_timestamp
if time_passed_seconds < config["bard"]["user_cooldown_seconds"]:
request_.error = True
logging.warning("User {0} sends Bard requests too quickly!".format(request_.user["user_id"]))
_user_module_cooldown(config, messages, request_,
config["bard"]["user_cooldown_seconds"] - time_passed_seconds)
else:
request_.user["timestamp_bard"] = int(time.time())
users_handler.save_user(request_.user)
proxy_ = None
if proxy and config["bard"]["proxy"] == "auto":
proxy_ = proxy
bard_module.initialize(proxy_)
bard_module.process_request(request_)
bard_module.exit()

# Bing ImageGen
elif request_.request_type == RequestResponseContainer.REQUEST_TYPE_BING_IMAGEGEN:
proxy_ = None
if proxy and config["bing_imagegen"]["proxy"] == "auto":
proxy_ = proxy
bing_image_gen_module.initialize(proxy_)
bing_image_gen_module.process_request(request_)
bing_imagegen_user_last_request_timestamp \
= UsersHandler.get_key_or_none(request_.user, "timestamp_bing_imagegen", 0)
time_passed_seconds = int(time.time()) - bing_imagegen_user_last_request_timestamp
if time_passed_seconds < config["bing_imagegen"]["user_cooldown_seconds"]:
request_.error = True
logging.warning("User {0} sends BingImageGen requests too quickly!".format(request_.user["user_id"]))
_user_module_cooldown(config, messages, request_,
config["bing_imagegen"]["user_cooldown_seconds"] - time_passed_seconds)
else:
request_.user["timestamp_bing_imagegen"] = int(time.time())
users_handler.save_user(request_.user)
proxy_ = None
if proxy and config["bing_imagegen"]["proxy"] == "auto":
proxy_ = proxy
bing_image_gen_module.initialize(proxy_)
bing_image_gen_module.process_request(request_)

# Wrong API type
else:
Expand Down Expand Up @@ -576,8 +675,10 @@ def _collect_data(self, request_response: RequestResponseContainer, log_request=

# Log response
else:
# DALL-E response
if request_response.request_type == RequestResponseContainer.REQUEST_TYPE_DALLE:
# DALL-E or BingImageGen response without error
if (request_response.request_type == RequestResponseContainer.REQUEST_TYPE_DALLE
or request_response.request_type == RequestResponseContainer.REQUEST_TYPE_BING_IMAGEGEN) \
and not request_response.error:
response = base64.b64encode(requests.get(request_response.response, timeout=120).content) \
.decode("utf-8")

Expand Down
48 changes: 27 additions & 21 deletions UsersHandler.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
"""

import logging
import multiprocessing
from typing import List, Dict

import JSONReaderWriter
Expand Down Expand Up @@ -48,15 +49,18 @@ def __init__(self, config: dict, messages: List[Dict]):
self.config = config
self.messages = messages

self.lock = multiprocessing.Lock()

def read_users(self) -> list:
"""
Reads users data from database
:return: users as list of dictionaries or [] if not found
"""
users = JSONReaderWriter.load_json(self.config["files"]["users_database"])
if users is None:
return []
return users
with self.lock:
users = JSONReaderWriter.load_json(self.config["files"]["users_database"])
if users is None:
return []
return users

def get_user_by_id(self, user_id: int) -> dict:
"""
Expand All @@ -82,24 +86,26 @@ def save_user(self, user_data: dict) -> None:
return

users = self.read_users()
user_index = -1
for i in range(len(users)):
if users[i]["user_id"] == user_data["user_id"]:
user_index = i
break

# User exists
if user_index >= 0:
new_keys = user_data.keys()
for new_key in new_keys:
users[user_index][new_key] = user_data[new_key]

# New user
else:
users.append(user_data)

# Save to database
JSONReaderWriter.save_json(self.config["files"]["users_database"], users)
with self.lock:
user_index = -1
for i in range(len(users)):
if users[i]["user_id"] == user_data["user_id"]:
user_index = i
break

# User exists
if user_index >= 0:
new_keys = user_data.keys()
for new_key in new_keys:
users[user_index][new_key] = user_data[new_key]

# New user
else:
users.append(user_data)

# Save to database
JSONReaderWriter.save_json(self.config["files"]["users_database"], users)

def _create_user(self, user_id: int) -> dict:
"""
Expand Down
25 changes: 20 additions & 5 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,10 @@
"cooldown_seconds": 20,

"__comment14__": "RESPONSE TIMEOUT (IN SECONDS)",
"timeout_seconds": 120
"timeout_seconds": 120,

"__comment15__": "HOW OFTEN EACH USER CAN SEND REQUESTS TO THIS MODULE (SPECIFY 0 TO REMOVE THE RESTRICTION)",
"user_cooldown_seconds": 0
},

"__comment03__": "EDGEGPT SETTINGS",
Expand All @@ -67,7 +70,10 @@
"proxy": "",

"__comment06__": "RESPONSE TIMEOUT (IN SECONDS)",
"timeout_seconds": 240
"timeout_seconds": 240,

"__comment07__": "HOW OFTEN EACH USER CAN SEND REQUESTS TO THIS MODULE (SPECIFY 0 TO REMOVE THE RESTRICTION)",
"user_cooldown_seconds": 0
},

"__comment04__": "DALL-E SETTINGS",
Expand All @@ -83,7 +89,10 @@
"proxy": "",

"__comment05__": "RESPONSE TIMEOUT (IN SECONDS)",
"timeout_seconds": 60
"timeout_seconds": 120,

"__comment15__": "HOW OFTEN EACH USER CAN SEND REQUESTS TO THIS MODULE (SPECIFY 0 TO REMOVE THE RESTRICTION)",
"user_cooldown_seconds": 600
},

"__comment05__": "BING IMAGEGEN SETTINGS (CURRENTLY NOT WORKING)",
Expand All @@ -96,7 +105,10 @@
"proxy": "",

"__comment06__": "RESPONSE TIMEOUT (IN SECONDS)",
"timeout_seconds": 120
"timeout_seconds": 120,

"__comment15__": "HOW OFTEN EACH USER CAN SEND REQUESTS TO THIS MODULE (SPECIFY 0 TO REMOVE THE RESTRICTION)",
"user_cooldown_seconds": 0
},

"__comment06__": "BARD SETTINGS",
Expand All @@ -112,7 +124,10 @@
"proxy": "",

"__comment05__": "RESPONSE TIMEOUT (IN SECONDS)",
"timeout_seconds": 120
"timeout_seconds": 120,

"__comment15__": "HOW OFTEN EACH USER CAN SEND REQUESTS TO THIS MODULE (SPECIFY 0 TO REMOVE THE RESTRICTION)",
"user_cooldown_seconds": 0
},

"__comment07__": "TELEGRAM SETTINGS",
Expand Down
2 changes: 1 addition & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from JSONReaderWriter import load_json

# GPT-Telegramus version
__version__ = "3.1.0"
__version__ = "3.2.0"

# Logging level
LOGGING_LEVEL = logging.INFO
Expand Down
12 changes: 12 additions & 0 deletions messages.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@
"clear_error": "❌ Error clearing chat history!\\n\\n{0}",
"clear_select_module": "Select the module whose chat history you want to clear,\\nor ignore this message",
"module_select_module": "Your current module: {}\\nSelect the module to chat with,\\nor ignore this message",
"user_cooldown_error": "❌ Please wait {0} before sending request to the {1} module!",
"hours": "h",
"minutes": "m",
"seconds": "s",
"ban_message_admin": "✅ Banned user: {0}\\n\\nReason: {1}",
"ban_no_user_id": "❌ Error! Please specify user_id",
"ban_message_user": "❌ You're not whitelisted or you have been banned! \\n\\nReason: {0}",
Expand Down Expand Up @@ -81,6 +85,10 @@
"clear_error": "❌ Ошибка очистки истории чата!\\n\\n{0}",
"clear_select_module": "Выберите модуль, историю чата которого хотите очистить\\nИли проигнорируйте это сообщение",
"module_select_module": "Текущий модуль: {}\\nВыберите модуль для чата\\nИли проигнорируйте это сообщение",
"user_cooldown_error": "❌ Пожалуйста подождите {0}, прежде чем отправлять запрос к {1}",
"hours": "ч",
"minutes": "м",
"seconds": "с",
"ban_message_admin": "✅ Заблокирован пользователь: {0}\\n\\nПричина: {1}",
"ban_no_user_id": "❌ Ошибка! Пожалуйста, укажите user_id",
"ban_message_user": "❌ Вы не находитесь в белом списке или вы были забанены! \\n\\nПричина: {0}",
Expand Down Expand Up @@ -135,6 +143,10 @@
"clear_error": "❌ Ошибка очистбки истории чата!\\n\\n{0}",
"clear_select_module": "Выберимте модуль, историю чатба которогоб хотитеб очиститб\\nИли проигнорируйте енто сообщенне",
"module_select_module": "Текущий модуль: {}\\nВыберите модуль для чата\\nИли проигнорируйте это сообщенне",
"user_cooldown_error": "❌ Пожамлусто пождите есчо {0}, прежде чем черыкатб сообщчэние к {1}",
"hours": "ч",
"minutes": "м",
"seconds": "с",
"ban_message_admin": "✅ Заблокирован юзер: {0}\\n\\nПримчина: {1}",
"ban_no_user_id": "❌ Ошибка! Пожалеста, укажите user_id",
"ban_message_user": "❌ Вы не находитясб в белом списке или вы были забананены! \\n\\nПричина: {0}",
Expand Down

0 comments on commit 86e2e70

Please sign in to comment.