1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 | #Réseau de neurones Input-> L1 -> L2 -> Output
#Algorithme d'optimisation : BFGS
from matplotlib.pyplot import *
import numpy as np
from scipy import optimize
from scipy.interpolate import Rbf
from random import *
#Exemples
X = np.array(([3,5], [5,1], [10,2],[1,2],[45,51],[543,12],[833,920],[120,95]), dtype=float)
y = np.array(([0], [1], [1],[0],[0],[1],[0],[1]), dtype=float)
#Normalisation
X = X/np.amax(X, axis=0)
class Neural_Network(object):
def __init__(self):
self.inputLayerSize = 2
self.outputLayerSize = 1
self.hiddenLayers = [2,2]
#Poids Input->L1->L2->Output
self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayers[0])
self.W2 = np.random.randn(self.hiddenLayers[0],self.hiddenLayers[1])
self.W3 = np.random.randn(self.hiddenLayers[1],self.outputLayerSize)
def sigmoid(self, z):
return 1/(1+np.exp(-z))
def sigmoidDerivative(self,z):
return np.exp(-z)/((1+np.exp(-z))**2)
def forward(self, X):
self.z2 = np.dot(X, self.W1)
self.activation2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.activation2, self.W2)
self.activation3 = self.sigmoid(self.z3)
self.z4 = np.dot(self.activation3, self.W3)
yHat = self.sigmoid(self.z4)
return yHat
def costFunction(self, X, y):
self.yHat = self.forward(X)
#Fonction de cout quadratique
J = 0.5*sum((y-self.yHat)**2)
return J
def getParams(self):
params = np.concatenate((self.W1.ravel(), self.W2.ravel(), self.W3.ravel()))
return params
def setParams(self, params):
W1_start = 0
W1_end = self.hiddenLayers[0] * self.inputLayerSize
self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize , self.hiddenLayers[0]))
W2_end = W1_end + self.hiddenLayers[0]*self.hiddenLayers[1]
self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayers[0], self.hiddenLayers[1]))
W3_end = W2_end+self.hiddenLayers[1]*self.outputLayerSize
self.W3 = np.reshape(params[W2_end:W3_end], (self.hiddenLayers[1], self.outputLayerSize))
def computeGradients(self, X, y):
dJdW1, dJdW2,dJdW3 = self.costFunctionDerivative(X, y)
return np.concatenate((dJdW1.ravel(), dJdW2.ravel(), dJdW3.ravel()))
class trainer(object):
def __init__(self, N):
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X,y)
return cost, grad
def train(self, X, y):
self.X = X
self.y = y
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp' : True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
args=(X, y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
#Affichage des résultats
NN = Neural_Network()
T = trainer(NN)
T.train(X,y)
title('Réseau de neurones')
result =[]
xa = [randint(0,10**10) for i in range (0,1500)]
ya = [randint(0,10**10) for i in range (0,1500)]
for i in range (0,1500):
result.append(NN.forward(np.array([xa[i],ya[i]]))[0])
xi, yi = np.linspace(0,10**10, 150), np.linspace(0,10**10, 150)
xi, yi = np.meshgrid(xi, yi)
rbf = Rbf(xa, ya, result, function='linear')
zi = rbf(xi, yi)
imshow(zi, vmin=np.array(result).min(), vmax=np.array(result).max(), origin='lower',
extent=[np.array(xa).min(), np.array(xa).max(), np.array(ya).min(), np.array(ya).max()])
colorbar()
show()
|