00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #if (__cplusplus >= 201103L)
00011 #ifndef NEURALNETWORKCOMPUTE_H
00012 #define NEURALNETWORKCOMPUTE_H
00013
00014 #include <vector>
00015 #include <functional>
00016 #include <string>
00017 #include <cmath>
00018 #include <memory>
00019 #include <map>
00020
00021 #ifdef LEPTON
00022 #include "Lepton.h"
00023 #endif
00024
00025 namespace neuralnetworkCV {
00027 extern std::map<std::string, std::pair<std::function<double(double)>, std::function<double(double)>>> activation_function_map;
00028
00029 #ifdef LEPTON
00030
00031 class customActivationFunction {
00032 public:
00034 customActivationFunction();
00036 customActivationFunction(const std::string& expression_string);
00038 customActivationFunction(const customActivationFunction& source);
00040 customActivationFunction& operator=(const customActivationFunction& source);
00042 void setExpression(const std::string& expression_string);
00044 std::string getExpression() const;
00046 double evaluate(double x) const;
00048 double derivative(double x) const;
00049 private:
00050 std::string expression;
00051 std::unique_ptr<Lepton::CompiledExpression> value_evaluator;
00052 std::unique_ptr<Lepton::CompiledExpression> gradient_evaluator;
00053 double* input_reference;
00054 double* derivative_reference;
00055 };
00056 #endif
00057
00058 class denseLayer {
00059 private:
00060 size_t m_input_size;
00061 size_t m_output_size;
00062 std::function<double(double)> m_activation_function;
00063 std::function<double(double)> m_activation_function_derivative;
00064 #ifdef LEPTON
00065 bool m_use_custom_activation;
00066 customActivationFunction m_custom_activation_function;
00067 #else
00068 static const bool m_use_custom_activation = false;
00069 #endif
00070
00071 std::vector<std::vector<double>> m_weights;
00073 std::vector<double> m_biases;
00074 public:
00076 denseLayer() {}
00082 denseLayer(const std::string& weights_file, const std::string& biases_file, const std::function<double(double)>& f, const std::function<double(double)>& df);
00083 #ifdef LEPTON
00084
00088 denseLayer(const std::string& weights_file, const std::string& biases_file, const std::string& custom_activation_expression);
00089 #endif
00090
00091 void readFromFile(const std::string& weights_file, const std::string& biases_file);
00093 void setActivationFunction(const std::function<double(double)>& f, const std::function<double(double)>& df);
00095 void compute(const std::vector<double>& input, std::vector<double>& output) const;
00097 double computeGradientElement(const std::vector<double>& input, const size_t i, const size_t j) const;
00099 void computeGradient(const std::vector<double>& input, std::vector<std::vector<double>>& output_grad) const;
00101 size_t getInputSize() const {
00102 return m_input_size;
00103 }
00105 size_t getOutputSize() const {
00106 return m_output_size;
00107 }
00109 double getWeight(size_t i, size_t j) const {
00110 return m_weights[i][j];
00111 }
00112 double getBias(size_t i) const {
00113 return m_biases[i];
00114 }
00115 ~denseLayer() {}
00116 };
00117
00118 class neuralNetworkCompute {
00119 private:
00120 std::vector<denseLayer> m_dense_layers;
00121 std::vector<double> m_input;
00123 std::vector<std::vector<double>> m_layers_output;
00124 std::vector<std::vector<std::vector<double>>> m_grads_tmp;
00125 std::vector<std::vector<double>> m_chained_grad;
00126 private:
00128 static std::vector<std::vector<double>> multiply_matrix(const std::vector<std::vector<double>>& A, const std::vector<std::vector<double>>& B);
00129 public:
00130 neuralNetworkCompute(): m_dense_layers(0), m_layers_output(0) {}
00131 neuralNetworkCompute(const std::vector<denseLayer>& dense_layers);
00132 bool addDenseLayer(const denseLayer& layer);
00133
00134 const std::vector<double>& input() const {return m_input;}
00135 std::vector<double>& input() {return m_input;}
00137 void compute();
00138 double getOutput(const size_t i) const {return m_layers_output.back()[i];}
00139 double getGradient(const size_t i, const size_t j) const {return m_chained_grad[i][j];}
00141 const denseLayer& getLayer(const size_t i) const {return m_dense_layers[i];}
00143 size_t getNumberOfLayers() const {return m_dense_layers.size();}
00144 };
00145
00146 }
00147 #endif
00148 #endif