Skip to content

Commit cca6486

Browse files
author
root
committed
[FIX] CI
1 parent 8626a8d commit cca6486

File tree

5 files changed

+11
-10
lines changed

5 files changed

+11
-10
lines changed

.github/workflows/ci-auto-format-and-commit.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,11 @@ jobs:
2929
- name: Install formatter
3030
shell: bash
3131
run: |
32-
wget https://apt.llvm.org/llvm.sh && chmod +x ./llvm.sh && ./llvm.sh 20
32+
wget https://apt.llvm.org/llvm.sh
33+
sudo chmod +x ./llvm.sh && sudo ./llvm.sh 19
3334
sudo apt-get update
34-
sudo apt-get install clang-format-20
35-
sudo ln -sf $(which clang-format-20) /usr/bin/clang-format
35+
sudo apt-get install clang-format-19
36+
sudo ln -sf $(which clang-format-19) /usr/bin/clang-format
3637
python -m pip install black
3738
3839
- name: Run format script

csrc/lib/ops/conv2d/op.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ namespace pmpp::ops::cpu
44
{
55

66
template <>
7-
void launchConv2D<fp32_t>(const fp32_t* input, const fp32_t* kernel,
7+
void launchConv2d<fp32_t>(const fp32_t* input, const fp32_t* kernel,
88
fp32_t* output, int32_t inputHeight,
99
int32_t inputWidth, int32_t kernelSize)
1010
{

csrc/lib/ops/conv2d/op.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ __global__ void conv2DKernel(const ScalarT* input, const ScalarT* kernel,
5454
}
5555

5656
template <>
57-
void launchConv2D<fp32_t>(const fp32_t* d_input, const fp32_t* d_kernel,
57+
void launchConv2d<fp32_t>(const fp32_t* d_input, const fp32_t* d_kernel,
5858
fp32_t* d_output, int32_t inputHeight,
5959
int32_t inputWidth, int32_t kernelSize)
6060
{

csrc/lib/ops/conv2d/torch_impl.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ auto conv2D(const torch::Tensor& input, const torch::Tensor& kernel)
2222

2323
switch (input.scalar_type()) {
2424
case torch::kFloat32: {
25-
pmpp::ops::cpu::launchConv2D(input.const_data_ptr<fp32_t>(),
25+
pmpp::ops::cpu::launchConv2d(input.const_data_ptr<fp32_t>(),
2626
kernel.const_data_ptr<fp32_t>(),
2727
output.mutable_data_ptr<fp32_t>(),
2828
input_height, input_width, kernel_size);
@@ -56,7 +56,7 @@ auto conv2D(const torch::Tensor& input, const torch::Tensor& kernel)
5656

5757
switch (input.scalar_type()) {
5858
case torch::kFloat32: {
59-
pmpp::ops::cuda::launchConv2D(
59+
pmpp::ops::cuda::launchConv2d(
6060
input.data_ptr<fp32_t>(), kernel.data_ptr<fp32_t>(),
6161
output.data_ptr<fp32_t>(), input_height, input_width, kernel_size);
6262
break;

csrc/lib/ops/ops.hpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ void launchCvtRGBtoGray(uint8_t* picOut, const uint8_t* picIn, uint32_t nRows,
1313
void launchMatmul(const fp32_t* A, const fp32_t* B, fp32_t* C, size_t width);
1414

1515
template <typename ScalarT>
16-
void launchConv2D(const ScalarT* input, const ScalarT* kernel, ScalarT* output,
17-
int32_t inputHeight, int32_t inputWidth, int32_t kernelSize);
16+
void launchConv2d(const ScalarT* input, const ScalarT* kernel, ScalarT* output,
17+
int32_t inHeight, int32_t inWidth, int32_t kernelSize);
1818

1919
} // namespace pmpp::ops::cpu
2020

@@ -30,7 +30,7 @@ void launchMatmul(const fp32_t* dA, const fp32_t* dB, fp32_t* dC,
3030
size_t width);
3131

3232
template <typename ScalarT>
33-
void launchConv2D(const ScalarT* d_input, const ScalarT* d_kernel,
33+
void launchConv2d(const ScalarT* d_input, const ScalarT* d_kernel,
3434
ScalarT* d_output, int32_t inputHeight, int32_t inputWidth,
3535
int32_t kernelSize);
3636

0 commit comments

Comments
 (0)