Skip to content

Commit 7a99eea

Browse files
github-actions[bot]jamesnulliu
authored andcommitted
[skip ci] Auto-format code with clang-format and black
1 parent 1531d8c commit 7a99eea

File tree

9 files changed

+24
-18
lines changed

9 files changed

+24
-18
lines changed

csrc/include/pmpp/pch.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
#pragma once
22

3+
#include <algorithm>
34
#include <cuda_runtime.h>
4-
#include <torch/torch.h>
55
#include <torch/python.h>
6+
#include <torch/torch.h>
67
#include <type_traits>
7-
#include <algorithm>
88

99
#include "pmpp/system.hpp"
1010
#include "pmpp/types/cu_types.cuh"

csrc/lib/ops/alphabetHistogram/op.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#pragma once
22
#include "pmpp/pch.hpp"
33

4-
54
namespace pmpp::ops::cpu
65
{
76
template <typename ScalarT>

csrc/lib/ops/conv2d/op.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace pmpp::ops::cuda
99
{
1010
static constexpr int32_t MAX_CONV2D_KERNEL_SIZE = 9;
1111
static __constant__ fp32_t
12-
KERNEL[MAX_CONV2D_KERNEL_SIZE * MAX_CONV2D_KERNEL_SIZE];
12+
KERNEL[MAX_CONV2D_KERNEL_SIZE * MAX_CONV2D_KERNEL_SIZE];
1313

1414
template <typename ScalarT, uint32_t TILE_SIZE = 32>
1515
__global__ void conv2DKernel(const ScalarT* input, const ScalarT* kernel,

csrc/lib/ops/matmul/op.cuh

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,10 @@ void launchMatmul(const fp32_t* dA, const fp32_t* dB, fp32_t* dC, size_t width)
7575
PMPP_DEBUG_CUDA_ERR_CHECK(cudaGetLastError());
7676
}
7777

78-
namespace torch_impl{
79-
inline auto matmul(const torch::Tensor& A, const torch::Tensor& B) -> torch::Tensor
78+
namespace torch_impl
79+
{
80+
inline auto matmul(const torch::Tensor& A, const torch::Tensor& B)
81+
-> torch::Tensor
8082
{
8183
torch::Tensor C = torch::empty({A.size(0), B.size(1)}, A.options());
8284

@@ -93,5 +95,5 @@ inline auto matmul(const torch::Tensor& A, const torch::Tensor& B) -> torch::Ten
9395

9496
return C;
9597
}
96-
}
98+
} // namespace torch_impl
9799
} // namespace pmpp::ops::cuda

csrc/lib/ops/matmul/op.hpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@ void launchMatmul(const fp32_t* A, const fp32_t* B, fp32_t* C, size_t m)
1818

1919
namespace torch_impl
2020
{
21-
inline auto matmul(const torch::Tensor& A, const torch::Tensor& B) -> torch::Tensor
21+
inline auto matmul(const torch::Tensor& A, const torch::Tensor& B)
22+
-> torch::Tensor
2223
{
2324
torch::Tensor C = torch::empty({A.size(0), B.size(1)}, A.options());
2425

@@ -35,5 +36,5 @@ inline auto matmul(const torch::Tensor& A, const torch::Tensor& B) -> torch::Ten
3536

3637
return C;
3738
}
38-
}
39+
} // namespace torch_impl
3940
} // namespace pmpp::ops::cpu

csrc/lib/ops/reduction/op.hpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,10 @@ inline auto mulReduction(const torch::Tensor& in) -> torch::Tensor
2424

2525
switch (in.scalar_type()) {
2626
case torch::kFloat32: {
27-
result = torch::tensor(launchReduction(mutableIn.mutable_data_ptr<fp32_t>(), in.numel(),
28-
std::multiplies<>()), in.options());
27+
result =
28+
torch::tensor(launchReduction(mutableIn.mutable_data_ptr<fp32_t>(),
29+
in.numel(), std::multiplies<>()),
30+
in.options());
2931
break;
3032
}
3133
default: {

csrc/lib/ops/torch_impl.hpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#include "./cvtRGBtoGray/op.hpp"
99
#include "./matmul/op.cuh"
1010
#include "./matmul/op.hpp"
11-
#include "./vecAdd/op.cuh"
12-
#include "./vecAdd/op.hpp"
11+
#include "./reduction/op.cuh"
1312
#include "./reduction/op.hpp"
14-
#include "./reduction/op.cuh"
13+
#include "./vecAdd/op.cuh"
14+
#include "./vecAdd/op.hpp"

csrc/lib/ops/vecAdd/op.cuh

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ void launchVecAdd(const fp32_t* d_A, const fp32_t* d_B, fp32_t* d_C, size_t n)
2828

2929
namespace torch_impl
3030
{
31-
inline auto vectorAdd(const torch::Tensor& A, const torch::Tensor& B) -> torch::Tensor
31+
inline auto vectorAdd(const torch::Tensor& A, const torch::Tensor& B)
32+
-> torch::Tensor
3233
{
3334
torch::Tensor C = torch::empty_like(A);
3435

@@ -45,5 +46,5 @@ inline auto vectorAdd(const torch::Tensor& A, const torch::Tensor& B) -> torch::
4546

4647
return C;
4748
}
48-
}
49+
} // namespace torch_impl
4950
} // namespace pmpp::ops::cuda

csrc/lib/ops/vecAdd/op.hpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@ void launchVecAdd(const fp32_t* a, const fp32_t* b, fp32_t* c, size_t n)
1414

1515
namespace torch_impl
1616
{
17-
inline auto vectorAdd(const torch::Tensor& A, const torch::Tensor& B) -> torch::Tensor
17+
inline auto vectorAdd(const torch::Tensor& A, const torch::Tensor& B)
18+
-> torch::Tensor
1819
{
1920
torch::Tensor C = torch::zeros_like(A);
2021

@@ -31,5 +32,5 @@ inline auto vectorAdd(const torch::Tensor& A, const torch::Tensor& B) -> torch::
3132

3233
return C;
3334
}
34-
}
35+
} // namespace torch_impl
3536
} // namespace pmpp::ops::cpu

0 commit comments

Comments
 (0)