Skip to content

Commit a9cfd66

Browse files
joshmarkovicskrawcz
authored andcommitted
Enforce Ruff B905
1 parent fbb92f4 commit a9cfd66

File tree

18 files changed

+27
-19
lines changed

18 files changed

+27
-19
lines changed

contrib/docs/compile_docs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def _create_commit_file(df_path, single_df):
344344
os.makedirs(commit_path, exist_ok=True)
345345
with open(os.path.join(commit_path, "commit.txt"), "w") as f:
346346
for commit, ts in zip(
347-
single_df["__init__.py"]["commit"], single_df["__init__.py"]["timestamp"]
347+
single_df["__init__.py"]["commit"], single_df["__init__.py"]["timestamp"], strict=False
348348
):
349349
f.write(f"[commit::{commit}][ts::{ts}]\n")
350350

contrib/hamilton/contrib/user/skrawcz/customize_embeddings/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ def _accuracy_and_se(
409409
threshold = threshold_thousandths / 1000
410410
total = 0
411411
correct = 0
412-
for cs, ls in zip(cosine_similarity, labeled_similarity):
412+
for cs, ls in zip(cosine_similarity, labeled_similarity, strict=False):
413413
total += 1
414414
if cs > threshold:
415415
prediction = 1

contrib/hamilton/contrib/user/skrawcz/fine_tuning/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -657,7 +657,7 @@ def hold_out_set_predictions(
657657
)
658658
predictions.append(prediction)
659659
questions.append(tokenizer.decode(sample["input_ids"], skip_special_tokens=True))
660-
return list(zip(questions, predictions))
660+
return list(zip(questions, predictions, strict=False))
661661

662662

663663
if __name__ == "__main__":

examples/LLM_Workflows/modular_llm_stack/lancedb_module.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,9 @@ def data_objects(
6868
assert len(ids) == len(titles) == len(text_contents) == len(embeddings)
6969
return [
7070
dict(squad_id=id_, title=title, context=context, vector=embedding, **metadata)
71-
for id_, title, context, embedding in zip(ids, titles, text_contents, embeddings)
71+
for id_, title, context, embedding in zip(
72+
ids, titles, text_contents, embeddings, strict=False
73+
)
7274
]
7375

7476

examples/LLM_Workflows/modular_llm_stack/marqo_module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def data_objects(
5858
assert len(ids) == len(titles) == len(text_contents)
5959
return [
6060
dict(_id=id, title=title, Description=text_content)
61-
for id, title, text_content in zip(ids, titles, text_contents)
61+
for id, title, text_content in zip(ids, titles, text_contents, strict=False)
6262
if id is not None and title is not None or text_content is not None
6363
]
6464

examples/LLM_Workflows/modular_llm_stack/pinecone_module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def data_objects(
6262
assert len(ids) == len(titles) == len(embeddings)
6363
properties = [dict(title=title, **metadata) for title in titles]
6464
embeddings = [x.tolist() for x in embeddings]
65-
return list(zip(ids, embeddings, properties))
65+
return list(zip(ids, embeddings, properties, strict=False))
6666

6767

6868
def push_to_vector_db(

examples/LLM_Workflows/modular_llm_stack/qdrant_module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def data_objects(
5959
ids = list(range(len(ids)))
6060
payloads = [
6161
dict(id=_id, text_content=text_content, title=title, **metadata)
62-
for _id, title, text_content in zip(ids, titles, text_contents)
62+
for _id, title, text_content in zip(ids, titles, text_contents, strict=False)
6363
]
6464
embeddings = [x.tolist() for x in embeddings]
6565
return dict(ids=ids, vectors=embeddings, payload=payloads)

examples/LLM_Workflows/modular_llm_stack/weaviate_module.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def data_objects(
8686
assert len(ids) == len(titles) == len(text_contents)
8787
return [
8888
dict(squad_id=id_, title=title, context=context, **metadata)
89-
for id_, title, context in zip(ids, titles, text_contents)
89+
for id_, title, context in zip(ids, titles, text_contents, strict=False)
9090
]
9191

9292

examples/LLM_Workflows/retrieval_augmented_generation/backend/ingestion.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,9 @@ def store_documents(
212212
uuid=document_uuid,
213213
)
214214

215-
chunk_iterator = zip(pdf_obj["chunked_text"], pdf_obj["chunked_embeddings"])
215+
chunk_iterator = zip(
216+
pdf_obj["chunked_text"], pdf_obj["chunked_embeddings"], strict=False
217+
)
216218
for chunk_idx, (chunk_text, chunk_embedding) in enumerate(chunk_iterator):
217219
chunk_object = dict(content=chunk_text, chunk_index=chunk_idx)
218220
chunk_uuid = generate_uuid5(chunk_object, "Chunk")

examples/due_date_probabilities/probability_estimation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,8 @@ def raw_probabilities(raw_data: str) -> pd.DataFrame:
133133
days = [int(item.split(", ")[1].split()[0]) for item in raw_data]
134134
probability = [float(item.split()[5].replace("%", "")) / 100 for item in raw_data]
135135
probabilities_data = [
136-
(week * 7 + day, probability) for week, day, probability in zip(weeks, days, probability)
136+
(week * 7 + day, probability)
137+
for week, day, probability in zip(weeks, days, probability, strict=False)
137138
]
138139
probabilities_df = pd.DataFrame(probabilities_data)
139140
probabilities_df.columns = ["days", "probability"]

0 commit comments

Comments
 (0)