1const axios = require('axios');
2
3const fs = require('fs');
4const path = require('path');
5
6// helper function to help you convert your local images into base64 format
7async function toB64(imgPath) {
8 const data = fs.readFileSync(path.resolve(imgPath));
9 return Buffer.from(data).toString('base64');
10}
11
12const api_key = "YOUR API-KEY";
13const url = "https://api.segmind.com/v1/qwen2-vl-72b-instruct";
14
15const data = {
16 "messages": [
17 {
18 "role": "user",
19 "content" : "tell me a joke on cats"
20 },
21 {
22 "role": "assistant",
23 "content" : "here is a joke about cats..."
24 },
25 {
26 "role": "user",
27 "content" : "now a joke on dogs"
28 },
29 ]
30};
31
32(async function() {
33 try {
34 const response = await axios.post(url, data, { headers: { 'x-api-key': api_key } });
35 console.log(response.data);
36 } catch (error) {
37 console.error('Error:', error.response.data);
38 }
39})();
An array of objects containing the role and content
Could be "user", "assistant" or "system".
A string containing the user's query or the assistant's response.
To keep track of your credit usage, you can inspect the response headers of each API call. The x-remaining-credits property will indicate the number of remaining credits in your account. Ensure you monitor this value to avoid any disruptions in your API usage.
Qwen2-VL-72B-Instruct is an advanced image-text-to-text model designed for a wide range of visual understanding and reasoning tasks. This model is a significant upgrade from the previous Qwen-VL, incorporating several key enhancement.
Superior Image Understanding: Qwen2-VL achieves state-of-the-art performance on various visual understanding benchmarks including MathVista, DocVQA, RealWorldQA, and MTVQA. It demonstrates strong capabilities in processing images with different resolutions and aspect ratios.
Agent Capabilities: Qwen2-VL can be integrated with devices like mobile phones and robots for automatic operation based on visual environment and text instructions, demonstrating complex reasoning and decision-making skills.
Multilingual Support: Beyond English and Chinese, the model supports understanding text within images in many languages, including most European languages, Japanese, Korean, Arabic, and Vietnamese.
Dynamic Resolution Handling: Qwen2-VL can handle arbitrary image resolutions, mapping them into a dynamic number of visual tokens for a more human-like visual processing experience.
Advanced Positional Embedding: The model uses Multimodal Rotary Position Embedding (M-ROPE) to capture 1D textual, 2D visual, and 3D video positional information, enhancing its multimodal processing capabilities
Model Architecture: The model employs a large-scale transformer architecture with 72 billion parameters.
Resolution Flexibility: The model is able to process a range of image resolutions, and its computational requirements can be adjusted by setting minimum and maximum pixel counts to optimize performance for specific hardware. Images can be resized to a specific width and height.
The model has limitations in recognizing specific individuals or intellectual property.
It may struggle with complex, multi-step instructions.
Counting accuracy is not high in complex scenes.
Spatial reasoning skills, especially in 3D spaces, require further improvements.