A cross-framework C++ client library for Ollama API with support for both text and vision models. Works with OpenFrameworks, Cinder, and can be extended to other C++ frameworks.
- Framework-agnostic base: Core HTTP communication works with any C++ project
- OpenFrameworks support: Native integration with
ofPixels,ofTexture, andofImage - Cinder support: Native integration with Cinder's
SurfaceandTexture - Async & Sync APIs: Choose between callback-based or blocking calls
- Vision models: Send images for inference with multimodal models
- Text models: Send text prompts to chat models
- Windows support: Uses WinHTTP for reliable HTTP communication
- No external dependencies: Base64 encoding and JSON building included
#include <OllamaClient/OllamaClientOF.h>
class ofApp : public ofBaseApp {
OllamaClientOF ollama;
ofFbo drawingCanvas;
void setup() {
drawingCanvas.allocate(512, 512, GL_RGBA);
drawingCanvas.begin();
ofClear(255, 255, 255, 255);
drawingCanvas.end();
}
void keyPressed(int key) {
if (key == ' ') {
// Analyze the drawing
ofPixels pixels;
drawingCanvas.readToPixels(pixels);
ollama.sendPixelsForInference(
pixels,
"What do you see in this drawing?",
[](const string& result, void* userData) {
ofLogNotice() << "AI says: " << result;
},
this
);
}
}
};#include <OllamaClient/OllamaClientCinder.h>
class MyApp : public ci::app::App {
OllamaClientCinder ollama;
void analyzeSurface(const ci::Surface& surf) {
string result = ollama.sendSurfaceForInferenceSync(
surf,
"What colors are in this image?"
);
CI_LOG_I("Result: " << result);
}
};cd your-workspace/
git clone https://github.com/yourusername/ollama-client.git
cd your-project/Add ../OllamaClient/include to your project's include paths, then use:
#include <OllamaClient/OllamaClientOF.h>cd your-project/
git submodule add https://github.com/yourusername/ollama-client.git lib/OllamaClient
git submodule update --init --recursiveAdd lib/OllamaClient/include to your project's include paths.
Copy include/OllamaClient/ headers and src/ implementation files into your project.
All framework-specific clients inherit from this base class.
OllamaClientBase(
const string& host = "localhost",
int port = 11434,
const string& visionModel = "llava:7b",
const string& chatModel = "llama3"
);// Async
void sendPrompt(const string& prompt, InferenceCallback callback, void* userData);
// Sync
string sendPromptSync(const string& prompt);void setVisionModel(const string& visionModel);
string getVisionModel();// With ofPixels
void sendPixelsForInference(const ofPixels& pixels, const string& prompt,
InferenceCallback callback, void* userData);
string sendPixelsForInferenceSync(const ofPixels& pixels, const string& prompt);
// With ofTexture
void sendTextureForInference(const ofTexture& texture, const string& prompt,
InferenceCallback callback, void* userData);
string sendTextureForInferenceSync(const ofTexture& texture, const string& prompt);
// With ofImage
void sendImageForInference(const ofImage& image, const string& prompt,
InferenceCallback callback, void* userData);
string sendImageForInferenceSync(const ofImage& image, const string& prompt);static string textureToBase64Jpeg(const ofTexture& texture,
ofImageQualityType quality = OF_IMAGE_QUALITY_HIGH);
static string pixelsToBase64Jpeg(const ofPixels& pixels,
ofImageQualityType quality = OF_IMAGE_QUALITY_HIGH);// With ci::Surface
void sendSurfaceForInference(const ci::Surface& surface, const string& prompt,
InferenceCallback callback, void* userData);
string sendSurfaceForInferenceSync(const ci::Surface& surface, const string& prompt);
// With ci::gl::Texture
void sendTextureForInference(const ci::gl::Texture& texture, const string& prompt,
InferenceCallback callback, void* userData);
string sendTextureForInferenceSync(const ci::gl::Texture& texture, const string& prompt);class ofApp : public ofBaseApp {
OllamaClientOF ollama;
ofVideoGrabber camera;
float lastAnalysisTime;
void setup() {
camera.setup(640, 480);
lastAnalysisTime = 0;
}
void update() {
camera.update();
// Analyze every 5 seconds
if (ofGetElapsedTimef() - lastAnalysisTime > 5.0f) {
ollama.sendPixelsForInference(
camera.getPixels(),
"Describe what you see",
[](const string& result, void* userData) {
ofLogNotice() << "Camera: " << result;
},
this
);
lastAnalysisTime = ofGetElapsedTimef();
}
}
};void analyzeVideo() {
ofVideoPlayer video;
video.load("myvideo.mp4");
video.play();
OllamaClientOF ollama("localhost", 11434, "llava:7b");
// Analyze every 10 seconds
if (ofGetElapsedTimef() - lastAnalysisTime > 10.0f) {
ollama.sendPixelsForInference(
video.getPixels(),
"Describe what is happening in this video frame",
[](const string& result, void* userData) {
ofLogNotice() << "Frame description: " << result;
},
nullptr
);
lastAnalysisTime = ofGetElapsedTimef();
}
}ofImage img;
img.load("photo.jpg");
OllamaClientOF ollama;
ollama.setVisionModel("llava:7b");
string result = ollama.sendImageForInferenceSync(
img,
"What objects are visible in this image? List them."
);
cout << "Objects found: " << result << endl;OllamaClientOF ollama("localhost", 11434, "llava:7b", "llama3");
string answer = ollama.sendPromptSync(
"Explain quantum computing in simple terms"
);
cout << answer << endl;// Use a different vision model
ollama.setVisionModel("granite3.2-vision");
// Or specify in constructor
OllamaClientOF ollama("localhost", 11434, "bakllava", "codellama");- C++11 or later
- Windows: WinHTTP (included with Windows SDK)
- OpenFrameworks: 0.11.0 or later (for OF client)
- Cinder: 0.9.0 or later (for Cinder client)
- Ollama: Running locally or on a network server
- Download and install Ollama
- Pull a vision model:
ollama pull llava:7b # or ollama pull bakllava # or ollama pull granite3.2-vision
- The Ollama server runs automatically on
localhost:11434
OllamaClientBase (Framework-agnostic)
├── HTTP communication via WinHTTP
├── JSON payload building
├── Base64 encoding
└── Threading for async operations
OllamaClientOF (OpenFrameworks)
├── Inherits from OllamaClientBase
├── ofPixels, ofTexture, ofImage support
└── JPEG encoding via ofSaveImage
OllamaClientCinder (Cinder)
├── Inherits from OllamaClientBase
├── ci::Surface, ci::gl::Texture support
└── JPEG encoding via Cinder's writeImage
To add support for another framework, inherit from OllamaClientBase and implement:
class OllamaClientYourFramework : public OllamaClientBase {
protected:
// Convert your framework's image type to base64 JPEG
string convertImageToBase64Jpeg(const void* imageData,
float jpegQuality = 0.8f) override;
// Implement the pure virtual methods
void sendImageForInference(const void* imageData, const string& prompt,
InferenceCallback callback, void* userData) override;
string sendImageForInferenceSync(const void* imageData,
const string& prompt) override;
};MIT License - See LICENSE file for details
Contributions are welcome! Please feel free to submit pull requests or open issues.
Created for creative coding and AI experimentation with C++ frameworks.