From 380943208c35054e0fd30e34d361ebd93e5dd069 Mon Sep 17 00:00:00 2001 From: Rustem Date: Sat, 4 May 2019 05:03:17 +0300 Subject: [PATCH] added Pytorch 1.0 compatibility and also removed deprecations --- models/py_utils/_cpools/src/bottom_pool.cpp | 10 +++++----- models/py_utils/_cpools/src/left_pool.cpp | 10 +++++----- models/py_utils/_cpools/src/right_pool.cpp | 10 +++++----- models/py_utils/_cpools/src/top_pool.cpp | 10 +++++----- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/models/py_utils/_cpools/src/bottom_pool.cpp b/models/py_utils/_cpools/src/bottom_pool.cpp index bd6c65a..9f4b709 100644 --- a/models/py_utils/_cpools/src/bottom_pool.cpp +++ b/models/py_utils/_cpools/src/bottom_pool.cpp @@ -1,4 +1,4 @@ -#include +#include #include @@ -41,8 +41,8 @@ std::vector pool_backward( int32_t height = input.size(2); int32_t width = input.size(3); - auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width}); - auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, width}); + auto max_val = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); + auto max_ind = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA)); auto input_temp = input.select(2, 0); max_val.copy_(input_temp); @@ -54,8 +54,8 @@ std::vector pool_backward( output_temp.copy_(grad_output_temp); auto un_max_ind = max_ind.unsqueeze(2); - auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, width}); - auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width}); + auto gt_mask = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA)); + auto max_temp = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); for (int32_t ind = 0; ind < height - 1; ++ind) { input_temp = input.select(2, ind + 1); at::gt_out(gt_mask, input_temp, max_val); diff --git a/models/py_utils/_cpools/src/left_pool.cpp b/models/py_utils/_cpools/src/left_pool.cpp index fbc5d98..c3dae9f 100644 --- a/models/py_utils/_cpools/src/left_pool.cpp +++ b/models/py_utils/_cpools/src/left_pool.cpp @@ -1,4 +1,4 @@ -#include +#include #include @@ -41,8 +41,8 @@ std::vector pool_backward( int32_t height = input.size(2); int32_t width = input.size(3); - auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height}); - auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, height}); + auto max_val = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); + auto max_ind = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA)); auto input_temp = input.select(3, width - 1); max_val.copy_(input_temp); @@ -54,8 +54,8 @@ std::vector pool_backward( output_temp.copy_(grad_output_temp); auto un_max_ind = max_ind.unsqueeze(3); - auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, height}); - auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height}); + auto gt_mask = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA)); + auto max_temp = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); for (int32_t ind = 1; ind < width; ++ind) { input_temp = input.select(3, width - ind - 1); at::gt_out(gt_mask, input_temp, max_val); diff --git a/models/py_utils/_cpools/src/right_pool.cpp b/models/py_utils/_cpools/src/right_pool.cpp index 36c5c85..f2acd82 100644 --- a/models/py_utils/_cpools/src/right_pool.cpp +++ b/models/py_utils/_cpools/src/right_pool.cpp @@ -1,4 +1,4 @@ -#include +#include #include @@ -41,8 +41,8 @@ std::vector pool_backward( int32_t height = input.size(2); int32_t width = input.size(3); - auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height}); - auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, height}); + auto max_val = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); + auto max_ind = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA)); auto input_temp = input.select(3, 0); max_val.copy_(input_temp); @@ -54,8 +54,8 @@ std::vector pool_backward( output_temp.copy_(grad_output_temp); auto un_max_ind = max_ind.unsqueeze(3); - auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, height}); - auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, height}); + auto gt_mask = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA)); + auto max_temp = at::zeros({batch, channel, height}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); for (int32_t ind = 0; ind < width - 1; ++ind) { input_temp = input.select(3, ind + 1); at::gt_out(gt_mask, input_temp, max_val); diff --git a/models/py_utils/_cpools/src/top_pool.cpp b/models/py_utils/_cpools/src/top_pool.cpp index 4ac287f..03d5e1c 100644 --- a/models/py_utils/_cpools/src/top_pool.cpp +++ b/models/py_utils/_cpools/src/top_pool.cpp @@ -1,4 +1,4 @@ -#include +#include #include @@ -41,8 +41,8 @@ std::vector top_pool_backward( int32_t height = input.size(2); int32_t width = input.size(3); - auto max_val = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width}); - auto max_ind = at::zeros(torch::CUDA(at::kLong), {batch, channel, width}); + auto max_val = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); + auto max_ind = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA)); auto input_temp = input.select(2, height - 1); max_val.copy_(input_temp); @@ -54,8 +54,8 @@ std::vector top_pool_backward( output_temp.copy_(grad_output_temp); auto un_max_ind = max_ind.unsqueeze(2); - auto gt_mask = at::zeros(torch::CUDA(at::kByte), {batch, channel, width}); - auto max_temp = at::zeros(torch::CUDA(at::kFloat), {batch, channel, width}); + auto gt_mask = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA)); + auto max_temp = at::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA)); for (int32_t ind = 1; ind < height; ++ind) { input_temp = input.select(2, height - ind - 1); at::gt_out(gt_mask, input_temp, max_val);