Skip to content

Commit 17c9112

Browse files
committed
Removed system prompt from vectorq config
1 parent b71e993 commit 17c9112

File tree

2 files changed

+2
-9
lines changed

2 files changed

+2
-9
lines changed

vectorq/config.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
from typing import Optional
2-
31
from vectorq.inference_engine.inference_engine import InferenceEngine
42
from vectorq.inference_engine.strategies.openai import OpenAIInferenceEngine
53
from vectorq.vectorq_core.cache.embedding_engine import OpenAIEmbeddingEngine
@@ -31,11 +29,9 @@ def __init__(
3129
vector_db: VectorDB = HNSWLibVectorDB(),
3230
embedding_metadata_storage: EmbeddingMetadataStorage = InMemoryEmbeddingMetadataStorage(),
3331
eviction_policy: EvictionPolicy = LRUEvictionPolicy(),
34-
system_prompt: Optional[str] = None,
3532
):
3633
self.inference_engine = inference_engine
3734
self.embedding_engine = embedding_engine
3835
self.vector_db = vector_db
3936
self.eviction_policy = eviction_policy
4037
self.embedding_metadata_storage = embedding_metadata_storage
41-
self.system_prompt = system_prompt

vectorq/main.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -43,14 +43,11 @@ def infer_with_cache_info(
4343
Infer a response from the cache and return the cache hit status, the response, and the nearest neighbor response.
4444
Args
4545
prompt: str - The prompt to create a response for.
46-
system_prompt: Optional[str] - The optional system prompt to use for the response. It will override the system prompt in the VectorQConfig if provided.
46+
system_prompt: Optional[str] - The optional system prompt to use for the response. It will override the system prompt set in the InferenceEngine if provided.
4747
inference_engine_kwargs: Any - Additional arguments to pass to the underlying inference engine (e.g., max_tokens, temperature, etc).
4848
Returns
4949
Tuple[bool, str, str] - [is_cache_hit, response, nn_response] (the response is the one supposed to be used by the user, the nn_response is for benchmarking purposes)
5050
"""
51-
if system_prompt is None:
52-
system_prompt = self.vectorq_config.system_prompt
53-
5451
return self.vectorq_policy.process_request(
5552
prompt=prompt,
5653
system_prompt=system_prompt,
@@ -67,7 +64,7 @@ def infer(
6764
Infer a response from the cache and return the response.
6865
Args
6966
prompt: str - The prompt to create a response for.
70-
system_prompt: Optional[str] - The optional system prompt to use for the response. It will override the system prompt in the VectorQConfig if provided.
67+
system_prompt: Optional[str] - The optional system prompt to use for the response. It will override the system prompt in the InferenceEngine if provided.
7168
inference_engine_kwargs: Any - Additional arguments to pass to the underlying inference engine (e.g., max_tokens, temperature, etc).
7269
Returns
7370
str - The response to be used by the user

0 commit comments

Comments
 (0)