diff --git a/models.yaml b/models.yaml index 127ff8d..f44fa9a 100644 --- a/models.yaml +++ b/models.yaml @@ -195,6 +195,13 @@ # - https://ai.google.dev/api/rest/v1beta/models/streamGenerateContent - provider: gemini models: + - name: gemini-3.1-pro-preview + max_input_tokens: 1048576 + max_output_tokens: 65535 + input_price: 0.3 + output_price: 2.5 + supports_vision: true + supports_function_calling: true - name: gemini-2.5-flash max_input_tokens: 1048576 max_output_tokens: 65535 @@ -670,8 +677,7 @@ # - https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini - provider: vertexai models: - - name: gemini-3-pro-preview - hipaa_safe: true + - name: gemini-3.1-pro-preview max_input_tokens: 1048576 max_output_tokens: 65536 input_price: 2 @@ -1198,7 +1204,6 @@ max_input_tokens: 1024 input_price: 0.07 - # Links: # - https://help.aliyun.com/zh/model-studio/getting-started/models # - https://help.aliyun.com/zh/model-studio/developer-reference/use-qwen-by-calling-api @@ -1881,7 +1886,7 @@ input_price: 0.3 output_price: 1.5 supports_function_calling: true - - name: qwen/qwen3-coder # Qwen3 Coder 480B A35B + - name: qwen/qwen3-coder # Qwen3 Coder 480B A35B max_input_tokens: 262144 input_price: 0.22 output_price: 0.95 @@ -2361,4 +2366,4 @@ - name: rerank-2-lite type: reranker max_input_tokens: 8000 - input_price: 0.02 \ No newline at end of file + input_price: 0.02