Skip to content

Commit

Permalink
feat: Updated GPT 3.5 Turbo (gpt-3.5-turbo-1106) のサポート (#143)
Browse files Browse the repository at this point in the history
  • Loading branch information
m1sk9 authored Nov 7, 2023
1 parent 0cc7f5e commit 40c5300
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
2 changes: 1 addition & 1 deletion src/event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ async fn process_ichiyoai(ctx: Context, message: Message) -> anyhow::Result<()>
"gpt-4-1106-preview".to_string()
// "gpt-4".to_string()
} else {
"gpt-3.5-turbo".to_string()
"gpt-3.5-turbo-1106".to_string()
};
let mut replies: Vec<ChatCompletionRequestMessage> =
vec![ChatCompletionRequestMessageArgs::default()
Expand Down
9 changes: 6 additions & 3 deletions src/model/chatgpt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,19 @@ const SCALE: f32 = 10_000_000.0;
const EXCHANGE_RATE: f32 = 150.0;

// https://openai.com/pricing
const GPT3_5_JPY_PER_INPUT_TOKEN: u32 = (0.0015 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT3_5_JPY_PER_OUTPUT_TOKEN: u32 = (0.002 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT3_5_TURBO_JPY_PER_INPUT_TOKEN: u32 = (0.0010 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT3_5_TURBO_JPY_PER_OUTPUT_TOKEN: u32 = (0.0020 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT4_JPY_PER_INPUT_TOKEN: u32 = (0.03 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT4_JPY_PER_OUTPUT_TOKEN: u32 = (0.06 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT4_TURBO_JPY_PER_INPUT_TOKEN: u32 = (0.01 * EXCHANGE_RATE * SCALE / 1000.0) as u32;
const GPT4_TURBO_JPY_PER_OUTPUT_TOKEN: u32 = (0.03 * EXCHANGE_RATE * SCALE / 1000.0) as u32;

pub fn usage_pricing(input_token: u32, output_token: u32, model: &str) -> f32 {
let (input_rate, output_rate) = match model {
"gpt-3.5-turbo" => (GPT3_5_JPY_PER_INPUT_TOKEN, GPT3_5_JPY_PER_OUTPUT_TOKEN),
"gpt-3.5-turbo-1106" => (
GPT3_5_TURBO_JPY_PER_INPUT_TOKEN,
GPT3_5_TURBO_JPY_PER_OUTPUT_TOKEN,
),
"gpt-4" => (GPT4_JPY_PER_INPUT_TOKEN, GPT4_JPY_PER_OUTPUT_TOKEN),
// NOTE: preview model. See also: https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
"gpt-4-1106-preview" => (
Expand Down

0 comments on commit 40c5300

Please sign in to comment.