diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 11c89b9..bdfa46b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 # Use the ref you want to point at + rev: v6.0.0 # Use the ref you want to point at hooks: - id: trailing-whitespace - id: check-ast @@ -20,12 +20,12 @@ repos: - id: check-toml - repo: https://github.com/astral-sh/uv-pre-commit - rev: 0.6.14 + rev: 0.10.3 hooks: - id: uv-lock - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.5 + rev: v0.15.1 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] @@ -40,7 +40,7 @@ repos: args: [] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.15.0 + rev: v1.19.1 hooks: - id: mypy entry: mypy diff --git a/mmlearn/modules/encoders/clip.py b/mmlearn/modules/encoders/clip.py index ebdaf53..706c490 100644 --- a/mmlearn/modules/encoders/clip.py +++ b/mmlearn/modules/encoders/clip.py @@ -327,7 +327,7 @@ def forward(self, inputs: dict[str, Any]) -> tuple[torch.Tensor]: """ input_ids = inputs[Modalities.TEXT.name] attention_mask: Optional[torch.Tensor] = inputs.get( - "attention_mask", inputs.get(Modalities.TEXT.attention_mask, None) + "attention_mask", inputs.get(Modalities.TEXT.attention_mask) ) position_ids = inputs.get("position_ids") diff --git a/mmlearn/modules/encoders/text.py b/mmlearn/modules/encoders/text.py index e608683..08d3dff 100644 --- a/mmlearn/modules/encoders/text.py +++ b/mmlearn/modules/encoders/text.py @@ -161,7 +161,7 @@ def forward(self, inputs: dict[str, Any]) -> BaseModelOutput: outputs = self.model( input_ids=inputs[Modalities.TEXT.name], attention_mask=inputs.get( - "attention_mask", inputs.get(Modalities.TEXT.attention_mask, None) + "attention_mask", inputs.get(Modalities.TEXT.attention_mask) ), position_ids=inputs.get("position_ids"), output_attentions=inputs.get("output_attentions"), diff --git a/projects/bioscan_clip/encoders.py b/projects/bioscan_clip/encoders.py index 8652972..80553ad 100644 --- a/projects/bioscan_clip/encoders.py +++ b/projects/bioscan_clip/encoders.py @@ -81,7 +81,7 @@ def forward(self, inputs: dict[str, Any]) -> BaseModelOutput: outputs = self.model( input_ids=inputs[Modalities.DNA.name], attention_mask=inputs.get( - "attention_mask", inputs.get(Modalities.DNA.attention_mask, None) + "attention_mask", inputs.get(Modalities.DNA.attention_mask) ), position_ids=inputs.get("position_ids"), output_attentions=inputs.get("output_attentions"),