Commit 01332ae5 authored by Gustavo Valiente's avatar Gustavo Valiente

Clang-Tidy warnings

parent 85957905
......@@ -79,9 +79,9 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
Dispatcher& dispatcher = layerData.dispatcher;
int threads = int(dispatcher.threads());
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -150,8 +150,8 @@ bool Conv1DLayer::apply(LayerData& layerData) const
Tensor& out = layerData.out;
out.resize(iw[0] - offset, ww[0]);
int threads = int(layerData.dispatcher.threads());
int threadSize = int(ww[2] * ww[1]) / threads;
auto threads = int(layerData.dispatcher.threads());
auto threadSize = int(ww[2] * ww[1]) / threads;
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
{
......
......@@ -94,9 +94,9 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
Dispatcher& dispatcher = layerData.dispatcher;
int threads = int(dispatcher.threads());
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -166,8 +166,8 @@ bool Conv2DLayer::apply(LayerData& layerData) const
Tensor& out = layerData.out;
out.resize(iw[0] - offsetY, iw[1] - offsetX, ww[0]);
int threads = int(layerData.dispatcher.threads());
int threadSize = int(ww[2] * ww[3]) / threads;
auto threads = int(layerData.dispatcher.threads());
auto threadSize = int(ww[2] * ww[3]) / threads;
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
{
......
......@@ -65,9 +65,9 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
Dispatcher& dispatcher = layerData.dispatcher;
int threads = int(dispatcher.threads());
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -135,8 +135,8 @@ bool DenseLayer::apply(LayerData& layerData) const
Tensor& out = layerData.out;
_biases.copyTo(out);
int threads = int(layerData.dispatcher.threads());
int threadSize = int(ww[1]) / threads;
auto threads = int(layerData.dispatcher.threads());
auto threadSize = int(ww[1]) / threads;
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
{
......
......@@ -85,9 +85,9 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
Dispatcher& dispatcher = layerData.dispatcher;
int threads = int(dispatcher.threads());
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -158,8 +158,8 @@ bool LocallyConnected1DLayer::apply(LayerData& layerData) const
out.resize(ww[0], ww[1]);
int threads = int(layerData.dispatcher.threads());
int threadSize = int(ww[2]) / threads;
auto threads = int(layerData.dispatcher.threads());
auto threadSize = int(ww[2]) / threads;
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
{
......
......@@ -85,9 +85,9 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
Dispatcher& dispatcher = layerData.dispatcher;
int threads = int(dispatcher.threads());
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -138,8 +138,8 @@ bool MaxPooling2DLayer::apply(LayerData& layerData) const
out.fill(-std::numeric_limits<Tensor::Type>::infinity());
Dispatcher& dispatcher = layerData.dispatcher;
int threads = int(dispatcher.threads());
int threadSize = int(iw[2]) / threads;
auto threads = int(dispatcher.threads());
auto threadSize = int(iw[2]) / threads;
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
{
......
......@@ -97,7 +97,7 @@ bool Model::predict(Dispatcher& dispatcher, Tensor in, Tensor& out) const
return false;
}
layerData.in = std::move(out);
layerData.in = std::move(layerData.out);
}
if(! _layers[layersCount - 1]->apply(layerData))
......
......@@ -32,9 +32,9 @@ namespace
void operator()() noexcept
{
int its = int(in->getSize());
int taskIts = its / threads;
int taskBegin = taskIts * taskId;
auto its = int(in->getSize());
auto taskIts = its / threads;
auto taskBegin = taskIts * taskId;
int taskEnd;
if(taskId == threads - 1)
......@@ -52,8 +52,8 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
int threads = int(dispatcher.threads());
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -77,9 +77,9 @@ namespace
void operator()() noexcept
{
int its = int(in->getSize());
int taskIts = its / threads;
int taskBegin = taskIts * taskId;
auto its = int(in->getSize());
auto taskIts = its / threads;
auto taskBegin = taskIts * taskId;
int taskEnd;
if(taskId == threads - 1)
......@@ -97,8 +97,8 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
int threads = int(dispatcher.threads());
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -161,8 +161,8 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
int threads = int(dispatcher.threads());
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -187,9 +187,9 @@ namespace
void operator()() noexcept
{
int its = int(in->getSize());
int taskIts = its / threads;
int taskBegin = taskIts * taskId;
auto its = int(in->getSize());
auto taskIts = its / threads;
auto taskBegin = taskIts * taskId;
int taskEnd;
if(taskId == threads - 1)
......@@ -208,8 +208,8 @@ namespace
}
};
std::array<Task, PT_MAX_CPU_THREADS> tasks;
int threads = int(dispatcher.threads());
std::array<Task, PT_MAX_CPU_THREADS> tasks{};
auto threads = int(dispatcher.threads());
for(int taskId = 0; taskId != threads; ++taskId)
{
......@@ -387,8 +387,8 @@ void Tensor::add(const Tensor& other, Tensor& out, Dispatcher& dispatcher) const
{
PT_ASSERT(_dims == other._dims);
int threads = int(dispatcher.threads());
int threadSize = int(getSize()) / threads;
auto threads = int(dispatcher.threads());
auto threadSize = int(getSize()) / threads;
copyTo(out);
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
......@@ -410,8 +410,8 @@ void Tensor::multiply(const Tensor& other, Tensor& out, Dispatcher& dispatcher)
PT_ASSERT(isValid());
PT_ASSERT(_dims == other._dims);
int threads = int(dispatcher.threads());
int threadSize = int(getSize()) / threads;
auto threads = int(dispatcher.threads());
auto threadSize = int(getSize()) / threads;
copyTo(out);
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
......@@ -436,8 +436,8 @@ void Tensor::dot(const Tensor& other, Tensor& out, Dispatcher& dispatcher) const
out.resize(_dims[0], other._dims[0]);
int threads = int(dispatcher.threads());
int threadSize = int(_dims[1]) / threads;
auto threads = int(dispatcher.threads());
auto threadSize = int(_dims[1]) / threads;
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
{
......@@ -458,8 +458,8 @@ void Tensor::fma(const Tensor& scale, const Tensor& bias, Tensor& out, Dispatche
PT_ASSERT(_dims == scale._dims);
PT_ASSERT(_dims == bias._dims);
int threads = int(dispatcher.threads());
int threadSize = int(getSize()) / threads;
auto threads = int(dispatcher.threads());
auto threadSize = int(getSize()) / threads;
bias.copyTo(out);
if(PT_LOOP_UNROLLING_ENABLE && threadSize && threadSize % (Tensor::VectorSize * 2) == 0)
......@@ -504,7 +504,7 @@ std::ostream& operator<<(std::ostream& stream, const Tensor& tensor)
{
const auto& dims = tensor.getDims();
std::vector<std::size_t> steps(dims.size());
std::partial_sum(dims.rbegin(), dims.rend(), steps.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(dims.rbegin(), dims.rend(), steps.rbegin(), std::multiplies<>());
size_t count = 0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment