mlpack
lp_pooling_impl.hpp
Go to the documentation of this file.
1 
13 #ifndef MLPACK_METHODS_ANN_LAYER_LP_POOLING_IMPL_HPP
14 #define MLPACK_METHODS_ANN_LAYER_LP_POOLING_IMPL_HPP
15 
16 // In case it hasn't yet been included.
17 #include "lp_pooling.hpp"
18 
19 namespace mlpack {
20 namespace ann {
21 
22 template<typename InputDataType, typename OutputDataType>
24 {
25  // Nothing to do here.
26 }
27 
28 template<typename InputDataType, typename OutputDataType>
30  const size_t normType,
31  const size_t kernelWidth,
32  const size_t kernelHeight,
33  const size_t strideWidth,
34  const size_t strideHeight,
35  const bool floor) :
36  normType(normType),
37  kernelWidth(kernelWidth),
38  kernelHeight(kernelHeight),
39  strideWidth(strideWidth),
40  strideHeight(strideHeight),
41  floor(floor),
42  inSize(0),
43  outSize(0),
44  inputWidth(0),
45  inputHeight(0),
46  outputWidth(0),
47  outputHeight(0),
48  reset(false),
49  batchSize(0)
50 {
51  // Nothing to do here.
52 }
53 
54 template<typename InputDataType, typename OutputDataType>
55 template<typename eT>
57  const arma::Mat<eT>& input, arma::Mat<eT>& output)
58 {
59  batchSize = input.n_cols;
60  inSize = input.n_elem / (inputWidth * inputHeight * batchSize);
61  inputTemp = arma::cube(const_cast<arma::Mat<eT>&>(input).memptr(),
62  inputWidth, inputHeight, batchSize * inSize, false, false);
63 
64  if (floor)
65  {
66  outputWidth = std::floor((inputWidth -
67  (double) kernelWidth) / (double) strideWidth + 1);
68  outputHeight = std::floor((inputHeight -
69  (double) kernelHeight) / (double) strideHeight + 1);
70  }
71  else
72  {
73  outputWidth = std::ceil((inputWidth -
74  (double) kernelWidth) / (double) strideWidth + 1);
75  outputHeight = std::ceil((inputHeight -
76  (double) kernelHeight) / (double) strideHeight + 1);
77  }
78 
79  outputTemp = arma::zeros<arma::Cube<eT> >(outputWidth, outputHeight,
80  batchSize * inSize);
81 
82  for (size_t s = 0; s < inputTemp.n_slices; s++)
83  Pooling(inputTemp.slice(s), outputTemp.slice(s));
84 
85  output = arma::Mat<eT>(outputTemp.memptr(), outputTemp.n_elem / batchSize,
86  batchSize);
87 
88  outputWidth = outputTemp.n_rows;
89  outputHeight = outputTemp.n_cols;
90  outSize = batchSize * inSize;
91 }
92 
93 template<typename InputDataType, typename OutputDataType>
94 template<typename eT>
96  const arma::Mat<eT>& /* input */,
97  const arma::Mat<eT>& gy,
98  arma::Mat<eT>& g)
99 {
100  arma::cube mappedError = arma::cube(((arma::Mat<eT>&) gy).memptr(),
101  outputWidth, outputHeight, outSize, false, false);
102 
103  gTemp = arma::zeros<arma::cube>(inputTemp.n_rows,
104  inputTemp.n_cols, inputTemp.n_slices);
105 
106  for (size_t s = 0; s < mappedError.n_slices; s++)
107  {
108  Unpooling(inputTemp.slice(s), mappedError.slice(s), gTemp.slice(s));
109  }
110 
111  g = arma::mat(gTemp.memptr(), gTemp.n_elem / batchSize, batchSize);
112 }
113 
114 template<typename InputDataType, typename OutputDataType>
115 template<typename Archive>
117  Archive& ar,
118  const uint32_t /* version */)
119 {
120  ar(CEREAL_NVP(normType));
121  ar(CEREAL_NVP(kernelWidth));
122  ar(CEREAL_NVP(kernelHeight));
123  ar(CEREAL_NVP(strideWidth));
124  ar(CEREAL_NVP(strideHeight));
125  ar(CEREAL_NVP(batchSize));
126  ar(CEREAL_NVP(floor));
127  ar(CEREAL_NVP(inputWidth));
128  ar(CEREAL_NVP(inputHeight));
129  ar(CEREAL_NVP(outputWidth));
130  ar(CEREAL_NVP(outputHeight));
131 }
132 
133 } // namespace ann
134 } // namespace mlpack
135 
136 #endif
void Backward(const arma::Mat< eT > &, const arma::Mat< eT > &gy, arma::Mat< eT > &g)
Ordinary feed backward pass of a neural network, using 3rd-order tensors as input, calculating the function f(x) by propagating x backwards through f.
Definition: lp_pooling_impl.hpp:95
Linear algebra utility functions, generally performed on matrices or vectors.
Definition: cv.hpp:1
void serialize(Archive &ar, const uint32_t)
Serialize the layer.
Definition: lp_pooling_impl.hpp:116
LpPooling()
Create the LpPooling object.
Definition: lp_pooling_impl.hpp:23
void Forward(const arma::Mat< eT > &input, arma::Mat< eT > &output)
Ordinary feed forward pass of a neural network, evaluating the function f(x) by propagating the activ...
Definition: lp_pooling_impl.hpp:56