02. 使用 Rust 学习 Agent 开发

Rust Openai 多模态示例 use async_openai::{ Client, types::chat::{ ChatCompletionRequestMessageContentPartImage, ChatCompletionRequestMessageContentPartText...
02. 使用 Rust 学习 Agent 开发
02. 使用 Rust 学习 Agent 开发

Rust Openai 多模态示例

use async_openai::{
    Client,
    types::chat::{
        ChatCompletionRequestMessageContentPartImage, ChatCompletionRequestMessageContentPartText,
        ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs, ImageDetail,
        ImageUrl,
    },
};
use base64::{Engine as _, engine::general_purpose::STANDARD};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    dotenvy::dotenv().ok();

    let image_path = "playground/examples/assets/广州塔.jpeg";
    let image_bytes = std::fs::read(image_path)?;
    let image_url = format!("data:image/jpeg;base64,{}", STANDARD.encode(&image_bytes));

    let client = Client::new();

    let request = CreateChatCompletionRequestArgs::default()
        .model("gpt-5.5")
        .messages([ChatCompletionRequestUserMessageArgs::default()
            .content(vec![
                ChatCompletionRequestMessageContentPartText::from(
                    "请描述下这张图片,这张图片所在位置是哪里呢?",
                )
                .into(),
                ChatCompletionRequestMessageContentPartImage::from(ImageUrl {
                    url: image_url,
                    detail: Some(ImageDetail::High),
                })
                .into(),
            ])
            .build()?
            .into()])
        .build()?;

    let response = client.chat().create(request).await?;

    println!("{response:#?}");
    Ok(())
}

使用 Response

use async_openai::{
    Client,
    types::responses::{CreateResponseArgs, InputContent, InputImageArgs, InputMessageArgs},
};
use base64::{Engine as _, engine::general_purpose::STANDARD};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    dotenvy::dotenv().ok();

    // Read local image and encode as base64 data URL
    let image_path = "playground/examples/assets/广州塔.jpeg";
    let image_bytes = std::fs::read(image_path)?;
    let image_url = format!("data:image/jpeg;base64,{}", STANDARD.encode(&image_bytes));

    let client = Client::new();

    let request = CreateResponseArgs::default()
        .model("gpt-5.5")
        .input(
            InputMessageArgs::default()
                .content(vec![
                    InputContent::from("请描述下这张图片,这张图片所在位置是哪里呢?"),
                    InputImageArgs::default()
                        .image_url(image_url)
                        .build()?
                        .into(),
                ])
                .build()?,
        )
        .build()?;

    let response = client.responses().create(request).await?;

    println!("{response:#?}");
    Ok(())
}

3 个帖子 - 2 位参与者

阅读完整话题

来源: LinuxDo 最新话题查看原文