Skip to main content

Incredible Python SDK

The official Python SDK for the Incredible API. Provides an Anthropic-compatible interface with enhanced features for all Incredible API endpoints.
🔑
Get your Incredible API key
Generate your API key to start using the SDK

Installation

pip install incredible-python
The SDK requires Python 3.8 or higher.

Quick Start

from incredible_python import Incredible

client = Incredible(api_key="YOUR_API_KEY")

# Simple chat completion
response = client.messages.create(
    model="small-1",
    max_tokens=150,
    messages=[{"role": "user", "content": "Give me 3 productivity tips."}],
)

print(response.content[0]['text'])
print("Token usage:", response.token_usage)

Configuration

The client can be configured via constructor arguments or environment variables:
from incredible_python import Incredible

client = Incredible(
    api_key="your-api-key",  # or set INCREDIBLE_API_KEY env var
    base_url="https://api.incredible.one",  # or set INCREDIBLE_BASE_URL
    timeout=600,  # Request timeout in seconds
    max_retries=2,  # Number of retry attempts
)
SettingEnvironment VariableDefault
api_keyINCREDIBLE_API_KEY(optional)
base_urlINCREDIBLE_BASE_URLhttps://api.incredible.one
timeout600 seconds
max_retries2

Callable Resources

All resources support both direct callable syntax and explicit .create() methods:
# Direct callable (more concise)
response = client.messages(
    model="small-1",
    messages=[{"role": "user", "content": "Hello"}],
    max_tokens=100
)

# Explicit method (more familiar)
response = client.messages.create(
    model="small-1",
    messages=[{"role": "user", "content": "Hello"}],
    max_tokens=100
)
Resources supporting callable syntax:
  • client.messages() / client.messages.create()
  • client.completions() / client.completions.create()
  • client.answer()
  • client.conversation()
  • client.agent()
  • client.web_search()
  • client.deep_research()
  • client.generate_image()
  • client.generate_video()

Messages API

Chat completions with Anthropic-compatible interface:
response = client.messages.create(
    model="small-1",
    max_tokens=256,
    temperature=0.7,
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Explain quantum computing in simple terms."},
    ],
)

print(response.content[0]['text'])
print(f"Stop reason: {response.stop_reason}")
print(f"Usage: {response.token_usage}")

Function Calling

from incredible_python import Incredible, helpers

client = Incredible(api_key="YOUR_API_KEY")

functions = [
    {
        "name": "calculate",
        "description": "Perform basic math operations",
        "parameters": {
            "type": "object",
            "properties": {
                "operation": {"type": "string", "enum": ["add", "subtract", "multiply", "divide"]},
                "a": {"type": "number"},
                "b": {"type": "number"},
            },
            "required": ["operation", "a", "b"],
        },
    },
]

# Initial request
response = client.messages.create(
    model="small-1",
    max_tokens=256,
    messages=[{"role": "user", "content": "What is 127 + 349?"}],
    functions=functions,
)

# Execute tool calls if any
if response.tool_calls:
    plan = helpers.build_tool_execution_plan(response.raw)
    if plan and not plan.is_empty():
        # Define your tool registry
        registry = {
            "calculate": lambda operation, a, b: {
                "add": a + b,
                "subtract": a - b,
                "multiply": a * b,
                "divide": a / b,
            }[operation],
        }
        
        # Execute tools
    results = helpers.execute_plan(plan, registry=registry)
        
        # Build follow-up messages
        follow_up = helpers.build_follow_up_messages(
            [{"role": "user", "content": "What is 127 + 349?"}],
            plan,
            results
        )
        
        # Get final response
    final = client.messages.create(
        model="small-1",
        max_tokens=256,
        messages=follow_up,
        functions=functions,
    )

        print(final.content[0]['text'])

Streaming

stream = client.messages.stream(
    model="small-1",
    max_tokens=256,
    messages=[{"role": "user", "content": "Write a haiku about coding."}],
)

for event in stream.iter_lines():
    if event.get("content", {}).get("type") == "content_chunk":
        print(event["content"]["content"], end="", flush=True)

Text Completions

Simple text completion (OpenAI-style):
response = client.completions.create(
    model="small-1",
    prompt="The capital of France is",
    max_tokens=10
)

print(response.choices[0].text)

Answer API

Simple question-answering with optional structured output:
# Simple answer
response = client.answer(
    query="What is the meaning of life?"
)

print(response.answer)

Conversation API

Multi-turn conversations with automatic context management:
response = client.conversation(
    messages=[
        {"role": "user", "content": "Hello!"},
        {"role": "assistant", "content": "Hi there! How can I help you today?"},
        {"role": "user", "content": "Tell me a joke"}
    ]
)

print(response.response)

Agent API

Autonomous agents with tool calling:
tools = [
    {
        "name": "calculator",
        "description": "Perform basic arithmetic calculations",
        "input_schema": {
            "type": "object",
            "properties": {
                "expression": {"type": "string", "description": "Math expression to evaluate"}
            },
            "required": ["expression"]
        }
    }
]

response = client.agent(
    messages=[{"role": "user", "content": "What is 25 * 4?"}],
    tools=tools
)

# Check for tool calls
if response.tool_calls:
    for call in response.tool_calls:
        print(f"Tool: {call.name}, Inputs: {call.inputs}")
else:
    print(response.content)

response = client.web_search(
    query="Python programming language",
    num_results=5
)

for result in response.results:
    print(f"{result.title}: {result.url}")

Deep Research

In-depth research with multiple searches and synthesis:
response = client.deep_research(
    instructions="Research the history of Python programming language"
)

print(response.output)

if response.citations:
    print(f"\nCitations: {len(response.citations)}")
    for citation in response.citations:
        print(f"  - {citation.title}: {citation.url}")

Image Generation

response = client.generate_image(
    prompt="A red circle",
    aspect_ratio="1:1"
)

print("Image URL:", response.image_url)
print("Seed:", response.seed)
Parameters:
  • prompt (required): Text description
  • aspect_ratio (optional): “1:1”, “16:9”, “9:16”, “4:3”, “3:4”, “21:9”, “9:21” (default: “16:9”)
  • output_format (optional): “jpeg” or “png” (default: “jpeg”)
  • seed (optional): Set a seed to reproduce the same style composition
  • prompt_upsampling (optional): Expand prompt with stylistic hints (default: False)
  • safety_tolerance (optional): Content moderation level 0-6 (default: 2)

Video Generation

import base64

response = client.generate_video(
    prompt="Ocean waves",
    size="1280x720"
)

print(f"Video ID: {response.video_id}")
print(f"Duration: {response.duration}s")

# Extract base64 video data from data URI
video_base64 = response.video_url.split(",", 1)[1]
video_bytes = base64.b64decode(video_base64)

with open("generated_video.mp4", "wb") as f:
    f.write(video_bytes)
print("Video saved!")
Parameters:
  • prompt (required): Text description
  • size (optional): Video dimensions - “1280x720”, “720x1280”, “1920x1080”, “1080x1920”, “1024x1024” (default: “1280x720”)

OCR

Extract text from images and PDFs:

Image OCR

import base64

# From local file (base64-encoded)
with open("receipt.png", "rb") as f:
    image_data = base64.b64encode(f.read()).decode("utf-8")

response = client.ocr.image(image=image_data)

print("Extracted text:", response.text)
print(f"Method: {response.method}")

PDF OCR

import base64

# From local file (base64-encoded)
with open("contract.pdf", "rb") as f:
    pdf_data = base64.b64encode(f.read()).decode("utf-8")

response = client.ocr.pdf(pdf=pdf_data)

print(f"Processed {response.pages_processed} of {response.total_pages} pages")
print(f"Method: {response.method}")
print("PDF text:", response.text[:500])

# Access per-page results
for page in response.pages:
    print(f"\n--- Page {page.page_number} ---")
    print(page.text[:200])

Files API

Upload and manage files:
# Upload file
with open("document.pdf", "rb") as f:
    response = client.files.upload(file=f)
    print(f"File ID: {response.file_id}")

# Upload from URL
response = client.files.upload_url(
    url="https://example.com/document.pdf"
)

# List files
files = client.files.list()
for file in files.data:
    print(f"{file.id}: {file.filename} ({file.size} bytes)")

# Get metadata
metadata = client.files.metadata(file_id="file_abc123")
print(f"Filename: {metadata.filename}, Size: {metadata.size}")

# Delete file
client.files.delete(file_id="file_abc123")

Integrations API

List, connect, and execute third-party integrations:
# List integrations
integrations = client.integrations.list()
for integration in integrations[:10]:
    print(f"{integration['id']}: {integration['name']}")

# Get integration details
details = client.integrations.retrieve("perplexity")
print(f"Features: {len(details['features'])}")

# Connect integration
connection = client.integrations.connect(
    "perplexity",
    user_id="user_123",
    api_key="perplexity-secret",
)

if hasattr(connection, 'requires_oauth') and connection.requires_oauth:
    print("OAuth required:", connection.redirect_url)
elif hasattr(connection, 'success'):
    print("Connected:", connection.success)

# Execute integration feature
feature_name = details["features"][0]["name"]  # e.g., "PERPLEXITYAI_PERPLEXITY_AI_SEARCH"
execution = client.integrations.execute(
    "perplexity",
    user_id="user_123",
    feature_name=feature_name,
    inputs={"query": "Latest AI news"},
)
print(execution)

Models API

List available models:
models = client.models.list()
print(f"Available models: {len(models['data'])}")
for model in models['data']:
    print(f"- {model['id']}")

Error Handling

The SDK provides typed exceptions for different error conditions:
from incredible_python import Incredible
from incredible_python._exceptions import (
    AuthenticationError,
    RateLimitError,
    NotFoundError,
    ValidationError,
    APIError,
)

client = Incredible(api_key="YOUR_API_KEY")

try:
    response = client.messages.create(
        model="small-1",
        messages=[{"role": "user", "content": "Hello"}],
        max_tokens=100
    )
except AuthenticationError as e:
    print(f"Authentication failed: {e}")
except RateLimitError as e:
    print(f"Rate limited. Retry after: {e.retry_after}s")
except NotFoundError as e:
    print(f"Resource not found: {e.message}")
except ValidationError as e:
    print(f"Invalid request: {e}")
except APIError as e:
    print(f"API error: {e}")

Helper Utilities

The SDK includes helper functions for tool execution workflows:
from incredible_python import helpers

# Build execution plan from response
plan = helpers.build_tool_execution_plan(response.raw)

# Execute plan with registry
results = helpers.execute_plan(plan, registry=your_tool_registry)

# Build follow-up messages
follow_up = helpers.build_follow_up_messages(original_messages, plan, results)

Examples

See the Incredible API Cookbook for complete examples and use cases.

Additional Resources