Skip to content
This repository was archived by the owner on Aug 28, 2025. It is now read-only.

Commit 5726609

Browse files
author
Azure Pipelines
committed
Merge remote-tracking branch 'origin/main' into publication
2 parents 1e0e807 + 0518bee commit 5726609

File tree

18 files changed

+45
-39
lines changed

18 files changed

+45
-39
lines changed

.github/workflows/ci_checks.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@ concurrency:
1414

1515
jobs:
1616
check-schema:
17-
uses: Lightning-AI/utilities/.github/workflows/check-schema.yml@v0.11.9
17+
uses: Lightning-AI/utilities/.github/workflows/check-schema.yml@v0.12.0

.github/workflows/docs-deploy.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ jobs:
7979

8080
- name: Deploy 🚀
8181
if: ${{ github.event_name != 'pull_request' }}
82-
uses: JamesIves/github-pages-deploy-action@v4.7.1
82+
uses: JamesIves/github-pages-deploy-action@v4.7.3
8383
with:
8484
token: ${{ secrets.GITHUB_TOKEN }}
8585
branch: gh-pages # The branch the action should deploy to.

.pre-commit-config.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ ci:
99

1010
repos:
1111
- repo: https://github.com/pre-commit/pre-commit-hooks
12-
rev: v4.6.0
12+
rev: v5.0.0
1313
hooks:
1414
- id: end-of-file-fixer
1515
- id: trailing-whitespace
@@ -45,7 +45,7 @@ repos:
4545
args: ["--print-width=120"]
4646

4747
- repo: https://github.com/executablebooks/mdformat
48-
rev: 0.7.17
48+
rev: 0.7.21
4949
hooks:
5050
- id: mdformat
5151
additional_dependencies:
@@ -55,7 +55,7 @@ repos:
5555
args: ["--number"]
5656

5757
- repo: https://github.com/astral-sh/ruff-pre-commit
58-
rev: v0.5.0
58+
rev: v0.8.6
5959
hooks:
6060
# try to fix what is possible
6161
- id: ruff

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ The addition has to formed as new folder:
4444
accelerator:
4545
- CPU
4646
```
47-
- _\[optional\]_ requirements listed in `requirements.txt` in the particular folder (in case you need some other packaged then listed the parent folder)
47+
- _[optional]_ requirements listed in `requirements.txt` in the particular folder (in case you need some other packaged then listed the parent folder)
4848

4949
## Using datasets
5050

5151
It is quite common to use some public or competition's dataset for your example.
5252
We facilitate this via defining the data sources in the metafile.
53-
There are two basic options, download a file from web or pul Kaggle dataset _\[Experimental\]_:
53+
There are two basic options, download a file from web or pul Kaggle dataset _[Experimental]_:
5454

5555
```yaml
5656
datasets:

course_UvA-DL/03-initialization-and-optimization/notebook.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def plot_dists(val_dict, color="C0", xlabel=None, stat="count", use_kde=True):
225225
kde=use_kde and ((val_dict[key].max() - val_dict[key].min()) > 1e-8),
226226
) # Only plot kde if there is variance
227227
hidden_dim_str = (
228-
r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else ""
228+
r"(%i $\to$ %i)" % (val_dict[key].shape[1], val_dict[key].shape[0]) if len(val_dict[key].shape) > 1 else "" # noqa: UP031
229229
)
230230
key_ax.set_title(f"{key} {hidden_dim_str}")
231231
if xlabel is not None:

course_UvA-DL/05-transformers-and-MH-attention/MHAttention.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@
118118
# * [Attention?
119119
# Attention!
120120
# (Lilian Weng, 2018)](https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html) - A nice blog post summarizing attention mechanisms in many domains including vision.
121-
# * [Illustrated: Self-Attention (Raimi Karim, 2019)](https://towardsdatascience.com/illustrated-self-attention-2d627e33b20a) - A nice visualization of the steps of self-attention.
121+
# * [Illustrated: Self-Attention (Raimi Karim, 2019)](https://medium.com/data-science/illustrated-self-attention-2d627e33b20a) - A nice visualization of the steps of self-attention.
122122
# Recommended going through if the explanation below is too abstract for you.
123123
# * [The Transformer family (Lilian Weng, 2020)](https://lilianweng.github.io/lil-log/2020/04/07/the-transformer-family.html) - A very detailed blog post reviewing more variants of Transformers besides the original one.
124124

@@ -633,8 +633,8 @@ def forward(self, x):
633633
fig, ax = plt.subplots(2, 2, figsize=(12, 4))
634634
ax = [a for a_list in ax for a in a_list]
635635
for i in range(len(ax)):
636-
ax[i].plot(np.arange(1, 17), pe[i, :16], color="C%i" % i, marker="o", markersize=6, markeredgecolor="black")
637-
ax[i].set_title("Encoding in hidden dimension %i" % (i + 1))
636+
ax[i].plot(np.arange(1, 17), pe[i, :16], color=f"C{i}", marker="o", markersize=6, markeredgecolor="black")
637+
ax[i].set_title(f"Encoding in hidden dimension {i + 1}")
638638
ax[i].set_xlabel("Position in sequence", fontsize=10)
639639
ax[i].set_ylabel("Positional encoding", fontsize=10)
640640
ax[i].set_xticks(np.arange(1, 17))
@@ -1088,7 +1088,7 @@ def plot_attention_maps(input_data, attn_maps, idx=0):
10881088
ax[row][column].set_xticklabels(input_data.tolist())
10891089
ax[row][column].set_yticks(list(range(seq_len)))
10901090
ax[row][column].set_yticklabels(input_data.tolist())
1091-
ax[row][column].set_title("Layer %i, Head %i" % (row + 1, column + 1))
1091+
ax[row][column].set_title(f"Layer {row + 1}, Head {column + 1}")
10921092
fig.subplots_adjust(hspace=0.5)
10931093
plt.show()
10941094

@@ -1590,7 +1590,7 @@ def visualize_prediction(idx):
15901590
visualize_prediction(mistakes[-1])
15911591
print("Probabilities:")
15921592
for i, p in enumerate(preds[mistakes[-1]].cpu().numpy()):
1593-
print("Image %i: %4.2f%%" % (i, 100.0 * p))
1593+
print(f"Image {i}: {100.0 * p:4.2f}%")
15941594

15951595
# %% [markdown]
15961596
# In this example, the model confuses a palm tree with a building, giving a probability of ~90% to image 2, and 8% to the actual anomaly.

course_UvA-DL/06-graph-neural-networks/requirements.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,6 @@ torch-sparse ==0.6.*
55
torch-cluster ==1.6.*
66
torch-spline-conv ==1.2.*
77
torch-geometric ==2.1.*
8+
9+
# todo: some compatibility issues
10+
numpy <2.0

course_UvA-DL/07-deep-energy-based-generative-models/notebook.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@ def on_epoch_end(self, trainer, pl_module):
570570
grid = torchvision.utils.make_grid(
571571
imgs_to_plot, nrow=imgs_to_plot.shape[0], normalize=True, value_range=(-1, 1)
572572
)
573-
trainer.logger.experiment.add_image("generation_%i" % i, grid, global_step=trainer.current_epoch)
573+
trainer.logger.experiment.add_image(f"generation_{i}", grid, global_step=trainer.current_epoch)
574574

575575
def generate_imgs(self, pl_module):
576576
pl_module.eval()

course_UvA-DL/08-deep-autoencoders/notebook.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ def on_train_epoch_end(self, trainer, pl_module):
388388
def train_cifar(latent_dim):
389389
# Create a PyTorch Lightning trainer with the generation callback
390390
trainer = pl.Trainer(
391-
default_root_dir=os.path.join(CHECKPOINT_PATH, "cifar10_%i" % latent_dim),
391+
default_root_dir=os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}"),
392392
accelerator="auto",
393393
devices=1,
394394
max_epochs=500,
@@ -402,7 +402,7 @@ def train_cifar(latent_dim):
402402
trainer.logger._default_hp_metric = None # Optional logging argument that we don't need
403403

404404
# Check whether pretrained model exists. If yes, load it and skip training
405-
pretrained_filename = os.path.join(CHECKPOINT_PATH, "cifar10_%i.ckpt" % latent_dim)
405+
pretrained_filename = os.path.join(CHECKPOINT_PATH, f"cifar10_{latent_dim}.ckpt")
406406
if os.path.isfile(pretrained_filename):
407407
print("Found pretrained model, loading...")
408408
model = Autoencoder.load_from_checkpoint(pretrained_filename)
@@ -475,7 +475,7 @@ def visualize_reconstructions(model, input_imgs):
475475
grid = torchvision.utils.make_grid(imgs, nrow=4, normalize=True, value_range=(-1, 1))
476476
grid = grid.permute(1, 2, 0)
477477
plt.figure(figsize=(7, 4.5))
478-
plt.title("Reconstructed from %i latents" % (model.hparams.latent_dim))
478+
plt.title(f"Reconstructed from {model.hparams.latent_dim} latents")
479479
plt.imshow(grid)
480480
plt.axis("off")
481481
plt.show()

course_UvA-DL/09-normalizing-flows/NF.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -512,7 +512,7 @@ def visualize_dequantization(quants, prior=None):
512512
x_ticks = []
513513
for v in np.unique(out):
514514
indices = np.where(out == v)
515-
color = to_rgb("C%i" % v)
515+
color = to_rgb(f"C{v}")
516516
plt.fill_between(inp[indices], prob[indices], np.zeros(indices[0].shape[0]), color=color + (0.5,), label=str(v))
517517
plt.plot([inp[indices[0][0]]] * 2, [0, prob[indices[0][0]]], color=color)
518518
plt.plot([inp[indices[0][-1]]] * 2, [0, prob[indices[0][-1]]], color=color)
@@ -525,7 +525,7 @@ def visualize_dequantization(quants, prior=None):
525525
plt.xlim(inp.min(), inp.max())
526526
plt.xlabel("z")
527527
plt.ylabel("Probability")
528-
plt.title("Dequantization distribution for %i discrete values" % quants)
528+
plt.title(f"Dequantization distribution for {quants} discrete values")
529529
plt.legend()
530530
plt.show()
531531
plt.close()

0 commit comments

Comments
 (0)