I am trying to fetch data from the CSV file via OpenAI API but I am getting an error as token exceed. I have used both GPT-3.5 turbo and all the version of GPT-4 model to check on that.
Is there any changes what I need to do in the prompt or should I use some other method to reduce the size of the token. I have also set the maximum token to 2500. I am using Swagger to check my end point request
[Route("AskQuestionCsv")]
public async Task<IActionResult> AskQuestionCsv([FromBody] string question)
{
if (string.IsNullOrWhiteSpace(extractedCsvText))
{
return BadRequest(new { Message = "No CSV content available. Please upload a CSV file first." });
}
if (string.IsNullOrWhiteSpace(question))
{
return BadRequest(new { Message = "Question cannot be empty." });
}
try
{
var openai = new OpenAIAPI("API_KEY");
var chatRequest = new ChatRequest
{
Model = "gpt-4",
Temperature = 0.7,
MaxTokens = 25000,
Messages = new List<ChatMessage>
{
new ChatMessage
{
Role = ChatMessageRole.System,
Content = "You are a helpful assistant."
},
new ChatMessage
{
Role = ChatMessageRole.User,
Content = $"Based on the following text from the CSV file, answer the question.\n\nCSV Text:\n{extractedCsvText}\n\nQuestion: {question}"
}
}
};
var chatResponse = await openai.Chat.CreateChatCompletionAsync(chatRequest);
var answer = chatResponse.Choices.FirstOrDefault()?.Message.Content.Trim();
return Ok(new { Question = question, Answer = answer });
}
catch (Exception ex)
{
return StatusCode(500, new { Message = "ERROR: " + ex.Message }
}
Error:
Error at chat/completions (https://api.openai.com/v1/chat/completions) with HTTP status code: TooManyRequests. Content: {\n "error": {\n "message": "Request too large for gpt-4 in organization org-uedxqeR1FzNcdHx3MOuawI9d on tokens per min (TPM): Limit 10000, Requested 6668686. The input or output tokens must be reduced in order to run successfully. Visit https://platform.openai.com/account/rate-limits to learn more.",\n "type": "tokens",\n "param": null,\n "code": "rate_limit_exceeded"\n }\n}\n