Jan 12, 2020
2 mins read
First of all, include libtorch
by
#include <torch/script.h>
torch::Device device(torch::cuda::is_available() ? torch::kCUDA : torch::kCPU);
// device 0
torch::Device device(torch::kCUDA, 0);
int width = 512;
int height = 512;
int* pixelData = (int *) malloc(width * height * sizeof(int));
some_initialization(pixelData); // do something to load the data.
at::Tensor t = torch::from_blob(pixelData, { width, height });
auto options = c10::TensorOptions().dtype(torch::kShort);
at::Tensor t = torch::from_blob(pixelData, { 1, width, height }, options);
tensor = tensor.toType(torch::kFloat32);
// get the value of tensor on index(0, 0) to float
float val = tensor[0][0].item<float>()
CUDA_TOOLKIT_ROOT_DIR
will be automatically set.CUDNN_LIBRARY_PATH
and library path CUDNN_INCLUDE_PATH
in CMakeLists.txt
to where you store the cuDNN package.Dataset
class CustomDataset : public torch::data::Dataset<CustomDataset> {
// use Batch as a alias of torch::data::Example<>
using Batch = torch::data::Example<>;
private:
std::vector<std::string> file_list;
public:
explicit ICHDataset(const std::vector<std::string> file_list)
: file_list(file_list) {}
Batch get(size_t index) {
auto = parse_data(index);
return { data, label };
}
torch::optional<size_t> size() const {
return file_list.size();
}
};
DataLoader
auto dataset = ICHDataset(file_list).map(torch::data::transforms::Stack<>());;;
auto dataloader = torch::data::make_data_loader(
std::move(dataset),
torch::data::DataLoaderOptions()
.batch_size(batch_size)
.workers(2)
.enforce_ordering(true)
);
error: ‘is_available’ is not a member of ‘at::cuda’
if (torch::cuda::is_available())`
Include <torch/torch.h>
, though <torch.script.h>
might works in the main.cpp file, it does not work in other files.
https://discuss.pytorch.org/t/torch-is-available-is-throwing-compilation-error/41158/3
Compile CUDA version without CUDA-capable device https://stackoverflow.com/questions/20186848/can-i-compile-a-cuda-program-without-having-a-cuda-device/20196425
Sharing is caring!