Skip to content

Commit

Permalink
Fixed issues and added direct modle requests
Browse files Browse the repository at this point in the history
  • Loading branch information
ToGe3688 committed Dec 7, 2024
1 parent d2cfe9f commit bc763c4
Show file tree
Hide file tree
Showing 10 changed files with 1,099 additions and 386 deletions.
266 changes: 236 additions & 30 deletions README.md

Large diffs are not rendered by default.

66 changes: 34 additions & 32 deletions admin/jsonConfig.json
Original file line number Diff line number Diff line change
@@ -1,38 +1,6 @@
{
"type": "tabs",
"items": {
"tab_1": {
"type": "panel",
"label": "Settings",
"items": {
"retry_delay": {
"type": "number",
"label": "Retry Delay",
"min": 15,
"max": 300,
"help": "Delay between tries to fulfill requests",
"default": 60,
"xs": 12,
"sm": 12,
"md": 6,
"lg": 4,
"xl": 4
},
"max_retries": {
"type": "number",
"label": "Maximum Retries",
"min": 0,
"max": 10,
"help": "Max number of tries to fulfill a request",
"default": 3,
"xs": 12,
"sm": 12,
"md": 6,
"lg": 4,
"xl": 4
}
}
},
"tab_2": {
"type": "panel",
"label": "Tools",
Expand Down Expand Up @@ -212,6 +180,40 @@
"max": 10000,
"step": 1,
"default": 2000
},
{
"type": "number",
"attr": "max_retries",
"xs": 12,
"sm": 4,
"md": 4,
"lg": 4,
"xl": 4,
"label": "Max. Retries",
"tooltip": "How many times should we retry if request to model fails",
"filter": false,
"sort": false,
"min": 0,
"max": 15,
"step": 1,
"default": 3
},
{
"type": "number",
"attr": "retry_delay",
"xs": 12,
"sm": 4,
"md": 4,
"lg": 4,
"xl": 4,
"label": "Retry Delay",
"tooltip": "How long to wait between retries",
"filter": false,
"sort": false,
"min": 0,
"max": 300,
"step": 1,
"default": 15
}
]
}
Expand Down
59 changes: 39 additions & 20 deletions io-package.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
{
"common": {
"name": "ai-toolbox",
"version": "0.0.1",
"version": "0.0.2",
"news": {
"0.0.2": {
"en": "Fixed issues and added direct model requests"
},
"0.0.1": {
"en": "initial release",
"de": "Erstveröffentlichung",
Expand Down Expand Up @@ -94,38 +97,54 @@
]
},
"native": {
"retry_delay": 30,
"max_retries": 3,
"bots": [
{
"bot_name": "Inspirator",
"bot_name": "simple-chatbot",
"bot_model": "meta-llama/llama-3.2-3b-instruct:free",
"bot_system_prompt": "You are an intelligent AI that only responds with short inspiring quotes based on the user input",
"bot_example_request": "Today is very cold",
"bot_example_response": "The coldest of nights can lead to the warmest of mornings.",
"temperature": 0.6,
"bot_system_prompt": "You are a friendly and conversational chatbot. Respond to user messages in an engaging and cheerful way. Keep your answers brief and focus on maintaining a pleasant tone.",
"bot_example_request": "Hi there! How are you today?",
"bot_example_response": "I'm doing great, thanks for asking! How about you?",
"temperature": 0.8,
"max_tokens": 3000,
"chat_history": 10,
"retry_delay": 30,
"max_retries": 3
},
{
"bot_name": "weather-reporter",
"bot_model": "meta-llama/llama-3.2-3b-instruct:free",
"bot_system_prompt": "You are a weather reporter chatbot. Based on the input data, generate a detailed and engaging weather report. Your response should include a friendly greeting, a brief overview of the current conditions, and a suggestion for the day. Keep your tone cheerful and informative.",
"bot_example_request": "Current datetime 12/7/2024, 12:00:01 PM. Weatherdata: Temperature 7°C, Humidity 87%, Cloudcover 93%, Airpressure 997mbar, Wind 23kmh, Rain 0mm.",
"bot_example_response": "Hello there! 🌤️ Happy December 7th! As we step into the afternoon, the weather is quite cool at a temperature of 7°C. With a humidity level of 87%, it feels a bit damp out there, so don’t forget to grab your favorite cozy sweater if you’re heading outside! Cloud cover is fairly high today, sitting at 93%, which gives the sky a soft, overcast appearance. The air pressure is at 997 mbar, and while there’s a moderate wind blowing at 23 km/h, it might feel a tad brisk, so a light jacket would be a good call. The good news is that we haven’t seen any rain today, so if you’re planning to venture out, it’s a great time for a stroll or perhaps a warm drink at your favorite café! Stay warm and enjoy your day! ☕🌧️",
"temperature": 0.7,
"max_tokens": 2000,
"chat_history": 0
"chat_history": 5,
"retry_delay": 30,
"max_retries": 3
},
{
"bot_name": "Weatherfrog",
"bot_name": "music-recommender",
"bot_model": "meta-llama/llama-3.2-3b-instruct:free",
"bot_system_prompt": "You are an assistant that creates individual and funny weather reports based on the provided data by the used",
"bot_example_request": "Today High 16°C Low: 6°C, 3mm Rain, Cloudcover 30%, 6 Sun hours, Wind 32kmh, Gusts 64kmh",
"bot_example_response": "Good morning, folks. Today's forecast is a mixed bag - kind of like my aunt's cooking. On one hand, the sun's trying to make an appearance with a decent 6 hours of sun-time. On the other hand, it's still going to feel pretty chilly, with highs only reaching 16°C. Don't forget your umbrella, though, as 3mm of rain will try to dampen your day. And, if that's not enough, gusts of up to 64kmh will be whispering (read: yelling) 'you're blowing away' in your ear. Cloud cover's relatively low at 30%, so at least that's a minor victory. So, grab a scarf and let's make the most of it!",
"temperature": 0.6,
"bot_system_prompt": "You are a music assistant. Based on the current weather and time of day, suggest a playlist or genre that matches the mood. Use concise and creative recommendations. You answer only with your suggestion and nothing else.",
"bot_example_request": "Current Time 24th December 2024 17:30. Outside Temperature: 10°C.",
"bot_example_response": "Christmas Music",
"temperature": 0.7,
"max_tokens": 2000,
"chat_history": 0
"chat_history": 7,
"retry_delay": 30,
"max_retries": 3
},
{
"bot_name": "AI-DJ",
"bot_name": "light-setter",
"bot_model": "meta-llama/llama-3.2-3b-instruct:free",
"bot_system_prompt": "You are an assistant that provides music genre,artist or topic recommendations based on user input. You respond only with the music genre, artist or topic recommendation and nothing else.",
"bot_example_request": "Today is the 24th december 2024",
"bot_example_response": "Christmas Songs",
"bot_system_prompt": "You are a smart home assistant. Based on the characteristics of the currently playing music, recommend RGB hex color values for five different lights to create an immersive atmosphere. Respond only with a JSON object containing the RGB hex values for each light.",
"bot_example_request": "Faithless - Insomnia",
"bot_example_response": "{\"light1\": \"#FF4500\",\"light2\": \"#FFA500\",\"light3\": \"#FFFF00\",\"light4\": \"#ADFF2F\",\"light5\": \"#00FF00\"}",
"temperature": 0.6,
"max_tokens": 2000,
"chat_history": 0
"chat_history": 0,
"retry_delay": 30,
"max_retries": 3
}
],
"anth_models": [
Expand Down
59 changes: 32 additions & 27 deletions lib/anthropic-ai-provider.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,54 +3,63 @@
class AnthropicAiProvider {
/**
* @param {object} adapter - The adapter instance
* @param {object} bot - The bot object
*/
constructor(adapter, bot) {
constructor(adapter) {
this.apiToken = adapter.config.anth_api_token;
this.adapter = adapter;
this.bot = bot;
this.adapter.log.debug(`Created new Anthropic AI provider for assistant: ${bot.bot_name}`);
this.adapter.log.debug(`Created new Anthropic AI provider`);
this.requestData = {};
this.responseData = {};
this.error = null;
}

/**
* Sends a request to the Anthropic AI API
* @param {Array} messages - The messages to send to the AI
*/
async request(messages) {
* Sends a request to the OpenRouter AI API
* @param {object} requestObject - The request object
* @returns {Promise<object>} - The response from the AI
* @throws {Error} - If the request fails
*/
async request(requestObject) {
let response = {};
try {
response = await this.sendRequest(messages);
response = await this.sendRequest(requestObject.model, requestObject.messages, requestObject.max_tokens, requestObject.system_prompt, requestObject.temperature);
} catch (error) {
response.error = error.message;
}
return response;
}

/**
* Checks if the API token is set
* @returns {boolean} - True if the API token is set
*/
apiTokenCheck() {
if (!this.apiToken) {
return false;
}
return true;
}

/**
* Sends the actual HTTP request to the Anthropic AI API
* @param {Array} messages - The messages to send to the AI
* @returns {Promise<object>} - The response from the AI
*/
async sendRequest(messages) {
async sendRequest(model, messages, max_tokens = 2000, system_prompt = null, temperature = 0.6) {
const url = "https://api.anthropic.com/v1/messages";

if (!messages || messages.length == 0) {
this.adapter.log.debug("No messages provided for request");
throw new Error(`No messages provided for request`);
}

this.adapter.log.debug("Messages array: " + JSON.stringify(messages));

const body = {
model: this.bot.bot_model,
system: this.bot.bot_system_prompt,
model: model,
system: system_prompt,
messages: messages,
max_tokens: this.bot.max_tokens,
temperature: this.bot.temperature
max_tokens: max_tokens,
temperature: temperature
};

this.adapter.log.debug("Request body: " + JSON.stringify(body));
this.adapter.setStateAsync(this.bot.bot_name + ".request.body", { val: JSON.stringify(body), ack: true });
this.requestData = body;

const response = await fetch(url, {
method: "POST",
Expand Down Expand Up @@ -105,7 +114,7 @@ class AnthropicAiProvider {
const data = await response.json();

this.adapter.log.debug(`Data: ` + JSON.stringify(data));
this.adapter.setStateAsync(this.bot.bot_name + ".response.raw", { val: JSON.stringify(data), ack: true });
this.responseData = data;

if (!data) {
this.adapter.log.warn(`No data from API error`);
Expand Down Expand Up @@ -140,16 +149,12 @@ class AnthropicAiProvider {
};
}

if (!data.model) {
data.model = this.bot.bot_model;
}

const responseObj = {
text: data.content[0].text,
raw: data,
model: data.model,
model: model,
tokens_input: data.usage.input_tokens,
tokens_output: data.usage.output_tokens,
error: null
};


Expand Down
Loading

0 comments on commit bc763c4

Please sign in to comment.