티스토리 뷰
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | //##Utils.h #ifndef _UTILS_H_ #define _UTILS_H_ #include <cstdlib> namespace ann { class Utils { public: template<typename T> static bool safeDelete(T*& target) { if (target == nullptr) { return true; } delete target; target = nullptr; return true; } template<typename T> static bool safeDeleteArray(T*& target) { if (target == nullptr) { return true; } delete[] target; target = nullptr; return true; } static float random() { return -1.f + rand()/float(RAND_MAX / 2); }; }; } #endif | cs |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 | //##NeuralNetwork.h #ifndef _NEURAL_NETWORK_H_ #define _NEURAL_NETWORK_H_ #include"Utils.h" #include <iostream> #include<vector> namespace ann { class Layer; class Neuron { private: using result_t = float; using dendrite_t = float; result_t Result; float Delta; size_t NumDendrites; dendrite_t* Dendrites; public: Neuron() : Result(0.f), Delta(0.f), NumDendrites(0), Dendrites(nullptr) { } ~Neuron() { Utils::safeDeleteArray(Dendrites); } result_t getResult() const { return Result; } float getDelta() const {return Delta;} float getDendrite(size_t index) const {return Dendrites[index];} bool init(size_t numDendrites, float result = 0.f); void simulate(const float* inputs); void train(const float* inputs, float learningRate, float target); void train(const Layer& postLayer, const float* inputs, float learningRate); }; class Layer { private: size_t NumNeurons; Neuron* Neurons; public: Layer() : NumNeurons(0), Neurons(nullptr) {} size_t getNumNeurons() const {return NumNeurons;} const Neuron& getNeuron(size_t index) const {return Neurons[index];} std::vector<float> getResults() const; bool init(size_t numNeurons); void simulate(float* outputs, const float* inputs); void train(const float* targets, const float* inputs, float learningRate); void train(const Layer& postLayer, const float* inputs, float learningRate); }; class NeuralNetwork { private: size_t NumLayers; Layer* Layers; public: NeuralNetwork(): NumLayers(0), Layers(nullptr) {} bool init(size_t numLayers, size_t numNeurons); void simulate(float* outputs, const float* inputs, size_t size); void train(const float* inputs, const float* targets, float learningRate); }; } #endif | cs |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | //##NeuralNetwork.cpp #include"NeuralNetwork.h" bool ann::Neuron::init(size_t numDendrites, float result) { Result = result; NumDendrites = numDendrites; Dendrites = new dendrite_t[NumDendrites + 1]; for(size_t i=0; i <= NumDendrites; i ++) { Dendrites[i] = ann::Utils::random(); } return true; } void ann::Neuron::simulate(const float * inputs) { Result = 0.f; for (size_t i = 0; i <= NumDendrites; i++) { Result += Dendrites[i] * inputs[i]; } Result = (Result > 0.f) ? Result : (0.1f * Result); } void ann::Neuron::train(const float* inputs, float learningRate, float target) { Delta = (target - Result) * ((Result > 0.f) ? 1.f : 0.1f); for(size_t i = 0; i <= NumDendrites; i ++) { Dendrites[i] += Delta * learningRate * inputs[i]; } } void ann::Neuron::train(const Layer & postLayer, const float * inputs, float learningRate) { Delta = 0.f; for(size_t i = 0; i < postLayer.getNumNeurons(); i ++) { const Neuron& curNeuron = postLayer.getNeuron(i); Delta += curNeuron.getDelta() * curNeuron.getDendrite(i); } for (size_t i = 0; i <= NumDendrites; i++) { Dendrites[i] += Delta * ((Result > 0.f) ? 1.f : 0.1f) * learningRate; } } std::vector<float> ann::Layer::getResults() const { std::vector<float> results(NumNeurons + 1); for(size_t i = 0; i <= NumNeurons; i ++) { results[i] = Neurons[i].getResult(); } return results; } bool ann::Layer::init(size_t numNeurons) { NumNeurons = numNeurons; Neurons = new Neuron[NumNeurons + 1]; for(size_t i=0; i < NumNeurons; i ++) { if(!Neurons[i].init(NumNeurons)) { return false; } } return Neurons[NumNeurons].init(0, 1.f); } void ann::Layer::simulate(float* outputs, const float* inputs) { for(size_t i=0; i<NumNeurons; i ++) { Neurons[i].simulate(inputs); } for(size_t i = 0; i <= NumNeurons; i ++) { outputs[i] = Neurons[i].getResult(); } } void ann::Layer::train(const float* targets, const float* inputs, float learningRate) { for(size_t i = 0; i < NumNeurons; i ++) { Neurons[i].train(inputs, learningRate, targets[i]); } } void ann::Layer::train(const Layer& postLayer, const float * inputs, float learningRate) { for (size_t i = 0; i < NumNeurons; i++) { Neurons[i].train(postLayer, inputs, learningRate); } } bool ann::NeuralNetwork::init(size_t numLayers, size_t numNeurons) { NumLayers = numLayers; Layers = new Layer[NumLayers]; for (size_t i = 0; i < NumLayers; i ++) { if (!Layers[i].init(numNeurons)) { return false; } } return true; } void ann::NeuralNetwork::simulate(float* outputs, const float* inputs, size_t size) { const size_t bufferSize = sizeof(float) * size; float* _inputs = new float[size + 1]; memcpy(_inputs, inputs, bufferSize); _inputs[size] = 1.f; for(size_t i=0; i < NumLayers; i ++) { Layers[i].simulate(_inputs, _inputs); } memcpy(outputs, _inputs, bufferSize); Utils::safeDeleteArray(_inputs); } void ann::NeuralNetwork::train(const float* inputs, const float* targets, float learningRate) { std::vector<float> _inputs(Layers[0].getNumNeurons() + 1); memcpy(&_inputs.front(), inputs, sizeof(float) * Layers[0].getNumNeurons()); _inputs[Layers[0].getNumNeurons()] = 1.f; for(size_t i = 0; i < NumLayers - 1; i ++) { Layers[i].train(Layers[i + 1], &_inputs.front(), learningRate); _inputs = Layers[i].getResults(); } Layers[NumLayers - 1].train(targets, &_inputs.front(), learningRate); } | cs |
asd
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | //##main.cpp #include "NeuralNetwork.h" #include <time.h> int main() { srand(unsigned int(time(NULL))); const size_t neurons = 10; ann::NeuralNetwork neuralnetwork; neuralnetwork.init(5, neurons); float inputs[neurons], outputs[neurons], targets[neurons]; for(int i = 0; i < neurons; i ++) { inputs[i] = fabsf(ann::Utils::random()); targets[i] = fabsf(ann::Utils::random()); std::cout << "i: " << inputs[i] << ", " << targets[i] << std::endl; } neuralnetwork.simulate(outputs, inputs, neurons); for(int i = 0; i < neurons; i ++) { std::cout << "o: " << outputs[i] << std::endl; } std::cout << std::endl; for(size_t i = 0; i < 100000; i ++) { neuralnetwork.train(inputs, targets, 0.01f); neuralnetwork.simulate(outputs, inputs, neurons); } for (int i = 0; i < neurons; i++) { std::cout << "o: " << outputs[i] << std::endl; } system("pause"); return 0; } | cs |
asdf
학습 횟수를 높이면 output이 nen이 나오네요
실수 정확도 문제아니면 논리적오류가 있는듯
※코딩지적및비판은 저에게 많은 도움이됩니다.
※코딩질문 또한 많은 도움이 됩니다.
undefined
공지사항
최근에 올라온 글
최근에 달린 댓글
- Total
- Today
- Yesterday
링크
TAG
- 표창 키우기
- DirectX
- PlayerPrefs
- 프레임워크
- ObjectPooling
- DirectX9
- ObjectPool
- 기능경기
- 파일저장
- Save
- 시간 능력
- STL
- 몬티홀 딜레마
- 직렬화
- C++
- c#
- 노맨즈 스카이
- 저장
- 3D
- 기경
- Serialization
- 오브젝트풀
- 불러오기
- UI
- Tap-Titans
- 2048 코드
- load
- 프로그래밍
- Unity
- 게임
일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | 4 | 5 | ||
6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 28 | 29 | 30 |
글 보관함