OpenCode配置CPA中转站,缺少上下文容量的参数,模型的上下文参数在哪里找?

OpenCode如果直接登陆chatgpt的账号,会自动获取到每个模型的上下文容量,token超过某个阈值会自动触发压缩,但是如果通过CPA来调用,好像就缺少这个信息了,因为我只配置了模型名称。 { "cpa": { "options": { "baseURL": "https://cpa.xxx....
OpenCode配置CPA中转站,缺少上下文容量的参数,模型的上下文参数在哪里找?
OpenCode配置CPA中转站,缺少上下文容量的参数,模型的上下文参数在哪里找?

OpenCode如果直接登陆chatgpt的账号,会自动获取到每个模型的上下文容量,token超过某个阈值会自动触发压缩,但是如果通过CPA来调用,好像就缺少这个信息了,因为我只配置了模型名称。

{
    "cpa": {
      "options": {
        "baseURL": "https://cpa.xxx.xxx/v1",
        "apiKey": "sk-xxxxxxxxxxxxx"
        "wire_api": "responses"
      },
      "models": {
        "glm-5": {
          "name": "GLM 5"
        },
        "gpt-5.2-codex": {
          "name": "GPT-5.2-Codex"
        },
        "gpt-5.2-codex-minimal": {
          "name": "GPT-5.2-Codex Minimal",
          "options": {
            "model": "gpt-5.2-codex(minimal)"
          }
        },
        "gpt-5.2-codex-low": {
          "name": "GPT-5.2-Codex Low",
          "options": {
            "model": "gpt-5.2-codex(low)"
          }
        },
        "gpt-5.2-codex-medium": {
          "name": "GPT-5.2-Codex Medium",
          "options": {
            "model": "gpt-5.2-codex(medium)"
          }
        },
        "gpt-5.2-codex-high": {
          "name": "GPT-5.2-Codex High",
          "options": {
            "model": "gpt-5.2-codex(high)"
          }
        },
        "gpt-5.2-codex-xhigh": {
          "name": "GPT-5.2-Codex XHigh",
          "options": {
            "model": "gpt-5.2-codex(xhigh)"
          }
        },
        "gpt-5.2-codex-auto": {
          "name": "GPT-5.2-Codex Auto",
          "options": {
            "model": "gpt-5.2-codex(auto)"
          }
        },
        "gpt-5.2-codex-none": {
          "name": "GPT-5.2-Codex None",
          "options": {
            "model": "gpt-5.2-codex(none)"
          }
        },
        "gpt-5.2": {
          "name": "GPT-5.2"
        },
        "gpt-5.2-minimal": {
          "name": "GPT-5.2 Minimal",
          "options": {
            "model": "gpt-5.2(minimal)"
          }
        },
        "gpt-5.2-low": {
          "name": "GPT-5.2 Low",
          "options": {
            "model": "gpt-5.2(low)"
          }
        },
        "gpt-5.2-medium": {
          "name": "GPT-5.2 Medium",
          "options": {
            "model": "gpt-5.2(medium)"
          }
        },
        "gpt-5.2-high": {
          "name": "GPT-5.2 High",
          "options": {
            "model": "gpt-5.2(high)"
          }
        },
        "gpt-5.2-xhigh": {
          "name": "GPT-5.2 XHigh",
          "options": {
            "model": "gpt-5.2(xhigh)"
          }
        },
        "gpt-5.2-auto": {
          "name": "GPT-5.2 Auto",
          "options": {
            "model": "gpt-5.2(auto)"
          }
        },
        "gpt-5.2-none": {
          "name": "GPT-5.2 None",
          "options": {
            "model": "gpt-5.2(none)"
          }
        },
        "gpt-5.3-codex": {
          "name": "GPT-5.3-Codex"
        },
        "gpt-5.3-codex-minimal": {
          "name": "GPT-5.3-Codex Minimal",
          "options": {
            "model": "gpt-5.3-codex(minimal)"
          }
        },
        "gpt-5.3-codex-low": {
          "name": "GPT-5.3-Codex Low",
          "options": {
            "model": "gpt-5.3-codex(low)"
          }
        },
        "gpt-5.3-codex-medium": {
          "name": "GPT-5.3-Codex Medium",
          "options": {
            "model": "gpt-5.3-codex(medium)"
          }
        },
        "gpt-5.3-codex-high": {
          "name": "GPT-5.3-Codex High",
          "options": {
            "model": "gpt-5.3-codex(high)"
          }
        },
        "gpt-5.3-codex-xhigh": {
          "name": "GPT-5.3-Codex XHigh",
          "options": {
            "model": "gpt-5.3-codex(xhigh)"
          }
        },
        "gpt-5.3-codex-auto": {
          "name": "GPT-5.3-Codex Auto",
          "options": {
            "model": "gpt-5.3-codex(auto)"
          }
        },
        "gpt-5.3-codex-none": {
          "name": "GPT-5.3-Codex None",
          "options": {
            "model": "gpt-5.3-codex(none)"
          }
        },
        "gpt-5.4": {
          "name": "GPT-5.4"
        },
        "gpt-5.4-minimal": {
          "name": "GPT-5.4 Minimal",
          "options": {
            "model": "gpt-5.4(minimal)"
          }
        },
        "gpt-5.4-low": {
          "name": "GPT-5.4 Low",
          "options": {
            "model": "gpt-5.4(low)"
          }
        },
        "gpt-5.4-medium": {
          "name": "GPT-5.4 Medium",
          "options": {
            "model": "gpt-5.4(medium)"
          }
        },
        "gpt-5.4-high": {
          "name": "GPT-5.4 High",
          "options": {
            "model": "gpt-5.4(high)"
          }
        },
        "gpt-5.4-xhigh": {
          "name": "GPT-5.4 XHigh",
          "options": {
            "model": "gpt-5.4(xhigh)"
          }
        },
        "gpt-5.4-auto": {
          "name": "GPT-5.4 Auto",
          "options": {
            "model": "gpt-5.4(auto)"
          }
        },
        "gpt-5.4-none": {
          "name": "GPT-5.4 None",
          "options": {
            "model": "gpt-5.4(none)"
          }
        },
        "gpt-5.5": {
          "name": "GPT-5.5"
        },
        "gemini-2.5-flash": {
          "name": "Gemini 2.5 Flash",
          "options": {
            "model": "gemini-2.5-flash"
          }
        },
        "gemini-2.5-flash-lite": {
          "name": "Gemini 2.5 Flash Lite",
          "options": {
            "model": "gemini-2.5-flash-lite"
          }
        },
        "gemini-2.5-pro": {
          "name": "Gemini 2.5 Pro",
          "options": {
            "model": "gemini-2.5-pro"
          }
        },
        "gemini-3-flash-preview": {
          "name": "Gemini 3 Flash",
          "options": {
            "model": "gemini-3-flash-preview"
          }
        },
        "gemini-3-pro-preview": {
          "name": "Gemini 3 Pro",
          "options": {
            "model": "gemini-3-pro-preview"
          }
        },
        "gemini-3.1-pro-preview": {
          "name": "Gemini 3.1 Pro",
          "options": {
            "model": "gemini-3.1-pro-preview"
          }
        },
        "gemini-3.1-flash-lite-preview": {
          "name": "Gemini 3.1 Flash Lite Preview",
          "options": {
            "model": "gemini-3.1-flash-lite-preview"
          }
        }
      }
    }
  }

我看了OpenCode配置文件的说明,好像是能配置模型上下文大小的,那么这个数据从哪里来比较准确呢?还是说根据公开资料自己填?

6 个帖子 - 4 位参与者

阅读完整话题

来源: linux.do查看原文