-
Notifications
You must be signed in to change notification settings - Fork 0
AQL_OOP_EXTENSION_PROPOSAL
Datum: 22. Dezember 2025
Version: v1.3.1 Proposal
Status: 📋 Vorschlag zur Diskussion
Autor: AI Code Review Agent
Dieses Dokument schlägt Erweiterungen der AQL (Advanced Query Language) vor, die auf objektorientierten Prinzipien (OOP) und Best Practices basieren. Die Vorschläge berücksichtigen insbesondere die neuen LLM-Erweiterungen (llama und llama.cpp vision) in v1.3.0 und zielen darauf ab, die Sprache wartbarer, wiederverwendbarer und ausdrucksstärker zu machen.
- Modularität: Namespace-System für bessere Code-Organisation
- Wiederverwendbarkeit: User-defined Functions (UDFs) und Typen
- Komposition: Method Chaining und Fluent Interfaces
- Vision-Integration: Erweiterte Multimodal-Operationen
- Typsicherheit: Stärkere Typsysteme für LLM-Operationen
Aktuell existiert nur ein flacher Namespace für alle Funktionen. Bei wachsender Funktionalität (LLM, Vision, Graph, Geo, etc.) wird dies unübersichtlich und fehleranfällig.
-- Namespace-Definition
NAMESPACE llm.vision;
-- Function in Namespace
FUNCTION analyze_image(img_path: String) -> Object {
LET result = LLM VISION ANALYZE img_path
USING MODEL 'llava-7b'
OPTIONS { detail: 'high' }
RETURN result
}
-- Verwendung mit qualifiziertem Namen
LET analysis = llm.vision::analyze_image('/images/drone_photo.jpg');
-- Import für kürzere Notation
IMPORT llm.vision.*;
LET analysis = analyze_image('/images/drone_photo.jpg');
namespace_decl ::=
"NAMESPACE" qualified_identifier ";"
qualified_identifier ::=
identifier ("." identifier)*
import_stmt ::=
"IMPORT" qualified_identifier ("." "*")? ";"
qualified_function_call ::=
qualified_identifier "::" function_name "(" argument_list? ")"- ✅ Bessere Code-Organisation
- ✅ Vermeidung von Namenskonflikten
- ✅ Klare Zuordnung von Funktionalität
- ✅ Ermöglicht Plugin-Erweiterungen
Komplexe Datenstrukturen werden als generische Objects dargestellt, ohne Typsicherheit oder Validierung.
-- Type Definition
TYPE VisionAnalysis {
objects: Array<DetectedObject>,
scene: String,
confidence: Float,
embeddings: Array<Float>
}
TYPE DetectedObject {
label: String,
confidence: Float,
bbox: BoundingBox,
attributes: Object? -- Optional field
}
TYPE BoundingBox {
x: Int,
y: Int,
width: Int,
height: Int
}
-- Usage
FUNCTION process_image(path: String) -> VisionAnalysis {
LET raw_result = LLM VISION ANALYZE path
-- Type-safe construction
RETURN VisionAnalysis {
objects: raw_result.detections MAP (d ->
DetectedObject {
label: d.label,
confidence: d.score,
bbox: BoundingBox(d.box),
attributes: d.attrs
}
),
scene: raw_result.scene_type,
confidence: raw_result.overall_confidence,
embeddings: raw_result.embedding
}
}
type_definition ::=
"TYPE" identifier "{" field_list "}"
field_list ::=
field_decl ("," field_decl)*
field_decl ::=
identifier ":" type_spec ("?")? -- ? marks optional
type_spec ::=
primitive_type
| "Array" "<" type_spec ">"
| "Map" "<" type_spec "," type_spec ">"
| qualified_identifier -- User-defined type- ✅ Typsicherheit zur Compile-Zeit
- ✅ Bessere Dokumentation
- ✅ IDE-Unterstützung (Autocomplete)
- ✅ Frühe Fehlererkennung
Keine Möglichkeit, wiederverwendbare Funktionen zu definieren. Code-Duplikation bei komplexen LLM-Workflows.
-- Simple Function
FUNCTION calculate_similarity(vec1: Array<Float>, vec2: Array<Float>) -> Float {
RETURN COSINE_SIMILARITY(vec1, vec2)
}
-- Function with Default Parameters
FUNCTION generate_summary(
text: String,
model: String = 'mistral-7b',
max_tokens: Int = 150
) -> String {
RETURN LLM INFER CONCAT('Summarize: ', text)
USING MODEL model
OPTIONS { max_tokens: max_tokens }
}
-- Recursive Function
FUNCTION traverse_graph_recursive(
start_id: String,
depth: Int = 3
) -> Array<Object> {
IF depth <= 0 THEN
RETURN []
ENDIF
LET neighbors = (
FOR v IN 1..1 OUTBOUND start_id edges
RETURN v
)
LET recursive_results = (
FOR n IN neighbors
RETURN traverse_graph_recursive(n._id, depth - 1)
)
RETURN UNION(neighbors, FLATTEN(recursive_results))
}
function_definition ::=
"FUNCTION" identifier "(" parameter_list? ")" "->" type_spec
"{" function_body "}"
parameter_list ::=
parameter ("," parameter)*
parameter ::=
identifier ":" type_spec ("=" expression)? -- Default value
function_body ::=
statement_list- ✅ Code-Wiederverwendung
- ✅ Reduzierung von Duplikation
- ✅ Bessere Testbarkeit
- ✅ Modulare Entwicklung
Verschachtelte Funktionsaufrufe sind schwer lesbar, besonders bei komplexen LLM-Pipelines.
-- Traditional nested style (hard to read)
LET result = UPPER(TRIM(SUBSTRING(doc.title, 0, 50)));
-- Pipeline style (readable, left-to-right)
LET result = doc.title
|> SUBSTRING(_, 0, 50)
|> TRIM(_)
|> UPPER(_);
-- LLM Pipeline Example
LET analysis = image_path
|> LLM VISION ANALYZE _
USING MODEL 'llava-7b'
|> _.description
|> LLM INFER CONCAT('Translate to German: ', _)
USING MODEL 'mistral-7b'
|> TRIM(_);
-- Complex RAG Pipeline
LET answer = user_query
|> LLM EMBED _ USING MODEL 'e5-base'
|> VECTOR_SEARCH(documents, _, 10)
|> MAP(_, d -> d.content)
|> JOIN(_, '\n\n')
|> LLM RAG user_query FROM CONTEXT _
WITH LORA 'domain-qa';
pipeline_expr ::=
expression ("|>" pipeline_stage)+
pipeline_stage ::=
function_call_with_placeholder
| llm_operation_with_placeholder
-- '_' represents the piped value
placeholder ::= "_"- ✅ Verbesserte Lesbarkeit
- ✅ Intuitiver Datenfluss
- ✅ Weniger Verschachtelung
- ✅ Funktionale Programmierung
Aktuelle LLM VISION-Syntax ist zu generisch. Benötigt spezifischere Operationen für Vision Tasks.
-- 5.1 Image Analysis with structured output
LLM VISION ANALYZE 'image.jpg'
USING MODEL 'llava-7b'
DETECT [
objects,
text,
faces,
landmarks
]
OPTIONS {
detail: 'high',
language: 'de'
}
RETURN AS VisionAnalysisResult;
-- 5.2 Batch Image Processing
LLM VISION BATCH ANALYZE
FOR image IN images
DETECT objects
USING MODEL 'llava-7b'
RETURN {
image_id: image._id,
analysis: _.objects,
timestamp: DATE_NOW()
};
-- 5.3 Image-to-Image Operations
LLM VISION TRANSFORM 'input.jpg'
USING MODEL 'stable-diffusion'
OPERATIONS [
{ type: 'enhance', strength: 0.8 },
{ type: 'style_transfer', style: 'artistic' }
]
OUTPUT TO 'output.jpg';
-- 5.4 Visual Question Answering (VQA)
LLM VISION QUESTION 'What objects are in this image?'
ABOUT IMAGE 'photo.jpg'
USING MODEL 'llava-7b'
OPTIONS { format: 'structured' };
-- 5.5 Image Comparison
LLM VISION COMPARE
IMAGES ['img1.jpg', 'img2.jpg']
USING MODEL 'clip'
RETURN {
similarity: _.score,
differences: _.diff_areas
};
-- 5.6 Multimodal RAG with Images
LLM VISION RAG 'Explain this medical scan'
FROM COLLECTION medical_images
WHERE image.modality == 'CT'
WITH IMAGE 'patient_scan.jpg'
TOP 5
USING MODEL 'llava-med'
WITH LORA 'radiology-qa';
llm_vision_stmt ::=
llm_vision_analyze
| llm_vision_batch
| llm_vision_transform
| llm_vision_question
| llm_vision_compare
| llm_vision_rag
llm_vision_analyze ::=
"LLM" "VISION" "ANALYZE" string_literal
"USING" "MODEL" string_literal
("DETECT" "[" detection_list "]")?
("OPTIONS" object_literal)?
("RETURN" "AS" type_spec)?
detection_list ::=
detection_type ("," detection_type)*
detection_type ::=
"objects" | "text" | "faces" | "landmarks" |
"emotions" | "brands" | "celebrities"
llm_vision_question ::=
"LLM" "VISION" "QUESTION" string_literal
"ABOUT" "IMAGE" string_literal
"USING" "MODEL" string_literal
("OPTIONS" object_literal)?
llm_vision_compare ::=
"LLM" "VISION" "COMPARE"
"IMAGES" array_literal
"USING" "MODEL" string_literal
("OPTIONS" object_literal)?- ✅ Spezialisierte Vision-Operations
- ✅ Strukturierte Ausgaben
- ✅ Batch-Verarbeitung
- ✅ Multimodale Integration
Keine strukturierte Fehlerbehandlung. LLM-Operationen können fehlschlagen.
-- Try-Catch Block
TRY {
LET result = LLM INFER 'Complex prompt'
USING MODEL 'large-model'
OPTIONS { max_tokens: 1000 }
RETURN result
}
CATCH (error) {
CASE error.type
WHEN 'LLM_MODEL_NOT_FOUND' THEN
RETURN LLM INFER 'Complex prompt'
USING MODEL 'fallback-model'
WHEN 'LLM_QUEUE_FULL' THEN
WAIT(5000) -- Wait 5 seconds
RETRY
WHEN 'LLM_TIMEOUT' THEN
RETURN { error: 'Timeout', message: error.message }
ELSE
THROW error -- Re-throw unknown errors
END
}
-- Optional Chaining (Elvis Operator)
LET value = risky_operation()?.result?.nested?.value ?? 'default';
-- Error Propagation
FUNCTION safe_llm_call(prompt: String) -> Result<String, Error> {
TRY {
RETURN Ok(LLM INFER prompt)
} CATCH (e) {
RETURN Err(e)
}
}
try_catch_stmt ::=
"TRY" "{" statement_list "}"
"CATCH" "(" identifier ")" "{" catch_body "}"
catch_body ::=
case_stmt | statement_list
case_stmt ::=
"CASE" expression
("WHEN" expression "THEN" statement_list)+
("ELSE" statement_list)?
"END"
optional_chaining ::=
expression ("?." identifier)*
(?? expression)? -- Null coalescingKomplexe LLM-Workflows erfordern State Management und wiederverwendbare Komponenten.
-- Class Definition
CLASS MultimodalRAGPipeline {
-- Properties
PRIVATE collection: String;
PRIVATE model: String;
PRIVATE lora: String?;
-- Constructor
CONSTRUCTOR(collection: String, model: String = 'llava-7b') {
THIS.collection = collection;
THIS.model = model;
}
-- Methods
PUBLIC METHOD setLoRA(lora_id: String) -> SELF {
THIS.lora = lora_id;
RETURN THIS; -- Enable chaining
}
PUBLIC METHOD analyzeImage(img_path: String) -> VisionAnalysis {
RETURN LLM VISION ANALYZE img_path
USING MODEL THIS.model
WITH LORA THIS.lora;
}
PUBLIC METHOD searchSimilar(embedding: Array<Float>, top_k: Int = 5) -> Array<Object> {
FOR doc IN THIS.collection
LET similarity = COSINE_SIMILARITY(doc.embedding, embedding)
FILTER similarity > 0.7
SORT similarity DESC
LIMIT top_k
RETURN doc;
}
PUBLIC METHOD executeRAG(query: String, image: String?) -> String {
LET context = IF image THEN
LET vision_result = THIS.analyzeImage(image);
LET similar = THIS.searchSimilar(vision_result.embeddings, 5);
CONCAT_SEPARATOR('\n', similar[*].content)
ELSE
LET query_embedding = LLM EMBED query;
LET similar = THIS.searchSimilar(query_embedding, 5);
CONCAT_SEPARATOR('\n', similar[*].content)
ENDIF;
RETURN LLM INFER CONCAT(
'Context: ', context, '\n\n',
'Question: ', query
)
USING MODEL THIS.model
WITH LORA THIS.lora;
}
}
-- Usage
LET pipeline = NEW MultimodalRAGPipeline('documents', 'llava-7b')
.setLoRA('medical-qa');
LET answer = pipeline.executeRAG(
'What is visible in this scan?',
'patient_scan.jpg'
);
class_definition ::=
"CLASS" identifier "{" class_body "}"
class_body ::=
(property_decl | constructor_decl | method_decl)*
property_decl ::=
("PUBLIC" | "PRIVATE") identifier ":" type_spec ";"
constructor_decl ::=
"CONSTRUCTOR" "(" parameter_list? ")" "{" statement_list "}"
method_decl ::=
("PUBLIC" | "PRIVATE") "METHOD" identifier
"(" parameter_list? ")" "->" type_spec
"{" statement_list "}"
object_instantiation ::=
"NEW" qualified_identifier "(" argument_list? ")"
method_call ::=
expression "." identifier "(" argument_list? ")"
this_reference ::= "THIS"
self_reference ::= "SELF" -- For method chainingLLM-Antworten sind oft unstrukturiert. Parsing und Validierung ist umständlich.
-- Match Expression
LET result = LLM INFER 'Classify this text: ' + doc.content
USING MODEL 'mistral-7b';
LET classification = MATCH result
WHEN /^Category:\s*(\w+)/ THEN $1
WHEN /^Type:\s*(\w+)/ THEN $1
ELSE 'unknown'
END;
-- Structural Pattern Matching
LET analysis = LLM VISION ANALYZE 'image.jpg';
LET action = MATCH analysis
WHEN { objects: [{ label: 'person', confidence: > 0.9 }, ...rest] } THEN
'High confidence person detected'
WHEN { objects: [...objects], scene: 'outdoor' } THEN
'Outdoor scene with ' + LENGTH(objects) + ' objects'
WHEN { objects: [] } THEN
'No objects detected'
ELSE
'Unknown pattern'
END;
-- Type-based Pattern Matching
FUNCTION handle_llm_result(result: Any) -> String {
MATCH result
WHEN String s THEN 'Got string: ' + s
WHEN Array<Object> arr THEN 'Got ' + LENGTH(arr) + ' objects'
WHEN VisionAnalysis v THEN 'Got vision analysis with ' + LENGTH(v.objects) + ' detections'
ELSE 'Unknown type'
END
}
match_expr ::=
"MATCH" expression
("WHEN" pattern "THEN" expression)+
("ELSE" expression)?
"END"
pattern ::=
regex_pattern
| structural_pattern
| type_pattern
| guard_pattern
regex_pattern ::=
"/" regex_body "/" ("THEN" capture_reference)?
structural_pattern ::=
object_pattern | array_pattern
type_pattern ::=
type_spec identifier
guard_pattern ::=
pattern ("WHERE" | "IF") expressionLLM-Inferenzen sind langsam. Blockierende Calls verschwenden Zeit bei parallelen Operationen.
-- Async Function
ASYNC FUNCTION analyze_documents_parallel(doc_ids: Array<String>) -> Array<String> {
LET tasks = FOR doc_id IN doc_ids
RETURN ASYNC {
LET doc = DOCUMENT('documents', doc_id);
RETURN LLM INFER doc.content
USING MODEL 'mistral-7b'
WITH LORA 'summarization';
};
-- Wait for all tasks
LET results = AWAIT ALL tasks;
RETURN results;
}
-- Async with timeout
LET result = AWAIT TIMEOUT(5000) {
LLM INFER 'Complex analysis'
USING MODEL 'large-model'
} OR 'Timeout - using cache';
-- Parallel execution with limit
LET results = AWAIT PARALLEL LIMIT 5 {
FOR doc IN documents
RETURN LLM EMBED doc.content
};
async_function ::=
"ASYNC" function_definition
async_block ::=
"ASYNC" "{" statement_list "}"
await_expr ::=
"AWAIT" expression
| "AWAIT" "ALL" array_expr
| "AWAIT" "TIMEOUT" "(" integer ")" expression
parallel_expr ::=
"AWAIT" "PARALLEL" ("LIMIT" integer)? expressionRepetitive Patterns bei LLM-Aufrufen. Boilerplate Code.
-- Macro Definition
MACRO rag_query(collection, query, model, lora) {
LET embedding = LLM EMBED $query USING MODEL $model;
LET context = (
FOR doc IN $collection
LET sim = COSINE_SIMILARITY(doc.embedding, embedding)
FILTER sim > 0.7
SORT sim DESC
LIMIT 5
RETURN doc.content
);
RETURN LLM INFER CONCAT(
'Context: ', JOIN(context, '\n\n'),
'\n\nQuestion: ', $query
)
USING MODEL $model
WITH LORA $lora;
}
-- Usage
LET answer = rag_query!(
documents,
'What is ThemisDB?',
'mistral-7b',
'tech-qa'
);
-- Conditional Compilation
#IF FEATURE_VISION_ENABLED
LET vision_result = LLM VISION ANALYZE image;
#ELSE
LET vision_result = { error: 'Vision not available' };
#ENDIF
macro_definition ::=
"MACRO" identifier "(" parameter_list? ")" "{" macro_body "}"
macro_body ::=
(statement | macro_variable_ref)*
macro_variable_ref ::=
"$" identifier
macro_invocation ::=
identifier "!" "(" argument_list? ")"
conditional_compilation ::=
"#IF" condition
statement_list
("#ELSE" statement_list)?
"#ENDIF"-- 1. Verwende Namespaces für Gruppierung
NAMESPACE llm.medical;
-- 2. Definiere klare Typen
TYPE PatientScan {
scan_id: String,
modality: String,
image_path: String,
metadata: ScanMetadata
}
-- 3. Dokumentiere Funktionen
/**
* Analyzes a medical scan using LLM vision.
*
* @param scan - The patient scan to analyze
* @param model - Vision model to use (default: 'llava-med')
* @return Analysis result with findings
*/
FUNCTION analyze_scan(
scan: PatientScan,
model: String = 'llava-med'
) -> ScanAnalysis {
-- Implementation
}
-- 4. Behandle Fehler explizit
TRY {
LET result = risky_operation();
} CATCH (e) {
LOG_ERROR('Operation failed', e);
RETURN default_value;
}
-- 5. Verwende Pipeline für Lesbarkeit
LET result = input
|> validate(_)
|> transform(_)
|> enrich_with_llm(_)
|> store(_);
-- 6. Vermeide Code-Duplikation mit UDFs
FUNCTION standard_rag(query: String, collection: String) -> String {
-- Reusable RAG implementation
}
-- 1. Batch LLM-Calls
LET summaries = AWAIT PARALLEL LIMIT 10 {
FOR doc IN documents
RETURN LLM INFER doc.content
};
-- 2. Cache häufige Queries
LET cached_embedding = CACHE_GET('user_query_' + query_hash)
?? LLM EMBED query;
-- 3. Verwende LoRA für Domain-Anpassung
LET result = LLM INFER prompt
WITH LORA 'domain-specific' -- Smaller, faster
INSTEAD OF USING MODEL 'giant-model';
-- 4. Limitiere Token-Ausgabe
LET summary = LLM INFER doc
OPTIONS { max_tokens: 100 }; -- Prevents runaway costs
-- BEFORE (v1.3.0)
LET summary = (
LET embedding = LLM EMBED doc.content;
LET similar = (
FOR d IN documents
FILTER COSINE_SIMILARITY(d.embedding, embedding) > 0.8
LIMIT 5
RETURN d
);
RETURN LLM INFER CONCAT(
'Context: ', JOIN(similar[*].content, '\n'),
'\nQuestion: ', query
);
);
-- AFTER (v1.3.1) - Using new features
NAMESPACE app.rag;
TYPE RAGContext {
query: String,
documents: Array<Object>,
embedding: Array<Float>
}
CLASS RAGPipeline {
CONSTRUCTOR(collection: String) {
THIS.collection = collection;
}
PUBLIC METHOD execute(query: String) -> String {
RETURN query
|> LLM EMBED _
|> THIS.findSimilar(_, 5)
|> THIS.generateAnswer(query, _);
}
PRIVATE METHOD findSimilar(emb: Array<Float>, k: Int) -> Array<Object> {
FOR doc IN THIS.collection
FILTER COSINE_SIMILARITY(doc.embedding, emb) > 0.8
LIMIT k
RETURN doc;
}
PRIVATE METHOD generateAnswer(query: String, docs: Array<Object>) -> String {
RETURN LLM INFER CONCAT(
'Context: ', JOIN(docs[*].content, '\n'),
'\nQuestion: ', query
);
}
}
-- Usage
LET pipeline = NEW app.rag::RAGPipeline('documents');
LET answer = pipeline.execute('What is ThemisDB?');
| Feature | Priorität | Komplexität | Nutzen | Status |
|---|---|---|---|---|
| Namespace System | 🔴 Hoch | Mittel | Sehr hoch | 📋 Vorschlag |
| User-Defined Types | 🔴 Hoch | Hoch | Sehr hoch | 📋 Vorschlag |
| User-Defined Functions | 🔴 Hoch | Mittel | Sehr hoch | 📋 Vorschlag |
| Method Chaining | 🟡 Mittel | Niedrig | Hoch | 📋 Vorschlag |
| Vision Extensions | 🔴 Hoch | Mittel | Sehr hoch | 📋 Vorschlag |
| Error Handling | 🟡 Mittel | Mittel | Hoch | 📋 Vorschlag |
| Classes & Methods | 🟢 Niedrig | Sehr hoch | Mittel | 📋 Optional |
| Pattern Matching | 🟢 Niedrig | Hoch | Mittel | 📋 Optional |
| Async/Await | 🟡 Mittel | Sehr hoch | Hoch | 📋 Future |
| Macros | 🟢 Niedrig | Hoch | Niedrig | 📋 Optional |
- ✅ Namespace System
- ✅ User-Defined Functions
- ✅ Basic Type System
- ✅ Method Chaining (Pipeline Operator)
- ✅ Extended Vision Commands
- ✅ Try-Catch Error Handling
- ✅ Optional Chaining
- ✅ User-Defined Types (Full)
- ✅ Pattern Matching
- ✅ Classes (Basic)
- ✅ Async/Await
- ✅ Parallel Execution
- ✅ Advanced Optimization
-
Review & Diskussion
- Team-Review dieses Proposals
- Priorisierung der Features
- Feedback von Early Adopters
-
Prototyping
- Parser-Erweiterungen
- Einfache Implementierung für kritische Features
- Benchmark & Performance-Tests
-
Dokumentation
- Erweiterte EBNF-Grammatik
- API-Dokumentation
- Tutorial & Beispiele
-
Implementation
- Schrittweise Umsetzung nach Priorität
- Backward Compatibility sicherstellen
- Comprehensive Testing
Dieses Dokument ist ein lebender Vorschlag. Feedback und Verbesserungsvorschläge sind willkommen:
- 📧 GitHub Issues: Diskussion einzelner Features
- 📝 Pull Requests: Konkrete Verbesserungen
- 💬 Community Forum: Allgemeines Feedback
Copyright (c) 2025 ThemisDB. Alle Rechte vorbehalten.
ThemisDB v1.3.4 | GitHub | Documentation | Discussions | License
Last synced: January 02, 2026 | Commit: 6add659
Version: 1.3.0 | Stand: Dezember 2025
- Übersicht
- Home
- Dokumentations-Index
- Quick Reference
- Sachstandsbericht 2025
- Features
- Roadmap
- Ecosystem Overview
- Strategische Übersicht
- Geo/Relational Storage
- RocksDB Storage
- MVCC Design
- Transaktionen
- Time-Series
- Memory Tuning
- Chain of Thought Storage
- Query Engine & AQL
- AQL Syntax
- Explain & Profile
- Rekursive Pfadabfragen
- Temporale Graphen
- Zeitbereichs-Abfragen
- Semantischer Cache
- Hybrid Queries (Phase 1.5)
- AQL Hybrid Queries
- Hybrid Queries README
- Hybrid Query Benchmarks
- Subquery Quick Reference
- Subquery Implementation
- Content Pipeline
- Architektur-Details
- Ingestion
- JSON Ingestion Spec
- Enterprise Ingestion Interface
- Geo-Processor Design
- Image-Processor Design
- Hybrid Search Design
- Fulltext API
- Hybrid Fusion API
- Stemming
- Performance Tuning
- Migration Guide
- Future Work
- Pagination Benchmarks
- Enterprise README
- Scalability Features
- HTTP Client Pool
- Build Guide
- Implementation Status
- Final Report
- Integration Analysis
- Enterprise Strategy
- Verschlüsselungsstrategie
- Verschlüsselungsdeployment
- Spaltenverschlüsselung
- Encryption Next Steps
- Multi-Party Encryption
- Key Rotation Strategy
- Security Encryption Gap Analysis
- Audit Logging
- Audit & Retention
- Compliance Audit
- Compliance
- Extended Compliance Features
- Governance-Strategie
- Compliance-Integration
- Governance Usage
- Security/Compliance Review
- Threat Model
- Security Hardening Guide
- Security Audit Checklist
- Security Audit Report
- Security Implementation
- Development README
- Code Quality Pipeline
- Developers Guide
- Cost Models
- Todo Liste
- Tool Todo
- Core Feature Todo
- Priorities
- Implementation Status
- Roadmap
- Future Work
- Next Steps Analysis
- AQL LET Implementation
- Development Audit
- Sprint Summary (2025-11-17)
- WAL Archiving
- Search Gap Analysis
- Source Documentation Plan
- Changefeed README
- Changefeed CMake Patch
- Changefeed OpenAPI
- Changefeed OpenAPI Auth
- Changefeed SSE Examples
- Changefeed Test Harness
- Changefeed Tests
- Dokumentations-Inventar
- Documentation Summary
- Documentation TODO
- Documentation Gap Analysis
- Documentation Consolidation
- Documentation Final Status
- Documentation Phase 3
- Documentation Cleanup Validation
- API
- Authentication
- Cache
- CDC
- Content
- Geo
- Governance
- Index
- LLM
- Query
- Security
- Server
- Storage
- Time Series
- Transaction
- Utils
Vollständige Dokumentation: https://makr-code.github.io/ThemisDB/