diff --git a/bot.py b/bot.py index a6ce5e1..bf3292e 100644 --- a/bot.py +++ b/bot.py @@ -494,66 +494,58 @@ async def on_message(message): # Prüfen, ob der Live-Chat aktiv ist if channel_id in live_chats and live_chats[channel_id]["active"]: - timestamp = int(time.time()) # Unix-Timestamp - message_data = { - "timestamp": timestamp, - "message_id": message.id, - "user_id": message.author.id, - "nickname": message.author.display_name, - "content": message.content - } + + await live_chat_queue.put((message, message.id, message.author.id, message.author.display_name, message.content)) - # Historie aktualisieren - live_chats[channel_id]["messages"].append(message_data) - save_chat_history(channel_id, live_chats[channel_id]["messages"]) - - # Nachricht zur AI-Warteschlange hinzufügen - await live_chat_queue.put((channel_id, message_data)) await client.process_commands(message) async def process_live_chat_queue(): while True: try: - channel_id, message_data = await live_chat_queue.get() + if not askmultus_queue.empty(): - # Kontext zusammenstellen: Introduction + Chat-Historie - introduction = read_file("chat_intro.txt") # Neue Introduction-Datei - chat_history = load_chat_history(channel_id) - formatted_history = "\n".join( - f"{msg['timestamp']} | {msg['user_id']} | {msg['nickname']}: {msg['content']}" - for msg in chat_history - ) + message, message.id, message.author.id, message.author.display_name, message.content = await live_chat_queue.get() - # Anfrage an die AI senden - ai_response = await send_to_ai(introduction, formatted_history) + live_introduction = read_file("live_introduction.txt") + live_background_data = read_file("live_background_data.txt") + message_data = live_introduction + " background data:" + live_background_data + chat_history = load_chat_history(message.channel.id) - if ai_response.strip() != "::null::": - channel = client.get_channel(channel_id) - if channel: - await channel.send(f"**AI:** {ai_response}") + timestamp = int(time.time()) # Unix-Timestamp - live_chat_queue.task_done() + content = timestamp + "/" + message.id + "/" + message.author.id + "/" + message.author.display_name + ":" + message.content + chat_history.append({"role": "user", "content": f"{content}"}) + + messages = [ + {"role": "system", "content": message_data}, + *chat_history + ] + + ai_answer = await loop.run_in_executor(executor, lambda: openai_instance.chat.completions.create( + model="model", + messages=messages, + temperature=0.8, + timeout=15, # Limit waiting time for response + )) + + ai_message = ai_answer.choices[0].message.content + chat_history.append({"role": "assistant", "content": ai_message}) + save_chat_history(message.channel.id, chat_history) + channel = message.channel + + if ai_message.strip() != "::null::": + if channel: + await channel.send(f"**AI:** {ai_message}") + + live_chat_queue.task_done() + + except asyncio.CancelledError: + break except Exception as e: logger.error(f"Error processing live chat queue: {e}") - -async def send_to_ai(introduction, chat_history): - """Sendet den Chat-Verlauf mit Introduction an die AI.""" - try: - response = openai_instance.chat.completions.create( - model="gpt-4", - messages=[ - {"role": "system", "content": introduction}, - {"role": "user", "content": f"Hier ist der bisherige Chat-Verlauf:\n{chat_history}\nAntworte auf die letzte Nachricht."} - ], - temperature=0.7, - max_tokens=150 - ) - return response.choices[0].message.content - except Exception as e: - logger.error(f"AI processing error: {e}") - return "::null::" + await asyncio.sleep(5) # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------