You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
RROR:kag.interface.common.llm_client:Error RetryError[<Future at 0x7fca89c904c0 state=finished raised BadRequestError>] during invocation: Traceback (most recent call last):
File "/openspg_venv/lib/python3.8/site-packages/tenacity/init.py", line 478, in call
result = fn(*args, **kwargs)
File "/openspg_venv/lib/python3.8/site-packages/kag/common/llm/openai_client.py", line 120, in call_with_json_parse
rsp = self(prompt)
File "/openspg_venv/lib/python3.8/site-packages/kag/common/llm/openai_client.py", line 99, in call
response = self.client.chat.completions.create(
File "/openspg_venv/lib/python3.8/site-packages/openai/_utils/_utils.py", line 279, in wrapper
return func(*args, **kwargs)
File "/openspg_venv/lib/python3.8/site-packages/openai/resources/chat/completions.py", line 859, in create
return self._post(
File "/openspg_venv/lib/python3.8/site-packages/openai/_base_client.py", line 1280, in post
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File "/openspg_venv/lib/python3.8/site-packages/openai/_base_client.py", line 957, in request
return self._request(
File "/openspg_venv/lib/python3.8/site-packages/openai/_base_client.py", line 1061, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 65536 tokens. However, you requested 507288 tokens (507288 in the messages, 0 in the completion). Please reduce the length of the messages or completion.", 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_request_error'}}
The text was updated successfully, but these errors were encountered:
RROR:kag.interface.common.llm_client:Error RetryError[<Future at 0x7fca89c904c0 state=finished raised BadRequestError>] during invocation: Traceback (most recent call last):
File "/openspg_venv/lib/python3.8/site-packages/tenacity/init.py", line 478, in call
result = fn(*args, **kwargs)
File "/openspg_venv/lib/python3.8/site-packages/kag/common/llm/openai_client.py", line 120, in call_with_json_parse
rsp = self(prompt)
File "/openspg_venv/lib/python3.8/site-packages/kag/common/llm/openai_client.py", line 99, in call
response = self.client.chat.completions.create(
File "/openspg_venv/lib/python3.8/site-packages/openai/_utils/_utils.py", line 279, in wrapper
return func(*args, **kwargs)
File "/openspg_venv/lib/python3.8/site-packages/openai/resources/chat/completions.py", line 859, in create
return self._post(
File "/openspg_venv/lib/python3.8/site-packages/openai/_base_client.py", line 1280, in post
return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File "/openspg_venv/lib/python3.8/site-packages/openai/_base_client.py", line 957, in request
return self._request(
File "/openspg_venv/lib/python3.8/site-packages/openai/_base_client.py", line 1061, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 65536 tokens. However, you requested 507288 tokens (507288 in the messages, 0 in the completion). Please reduce the length of the messages or completion.", 'type': 'invalid_request_error', 'param': None, 'code': 'invalid_request_error'}}
The text was updated successfully, but these errors were encountered: