查看: 800|回复: 2
|
求python大神帮助!!急急急
[复制链接]
|
|
以下是single output neuron perceptron的source code... 想请大神们帮帮忙把它modify去two output neuron perceptron that candiscriminate training examples from four class labelstraining 的example如下:
import numpy as np
class Perceptron(object):
#learning_rate: float - Learning rate
#weights:array - weight values
#errors: list - number of wrong classification
#epoch: int - max no of iteration (epoch)
def __init__(self,learning_rate=0.01, epoch = 10):
self.learning_rate = learning_rate
self.epoch=epoch
def training(self,X,y):
#initilize the weight
self.weights = np.random.rand(1+X.shape[1])
self.errors = []
no_of_samples = X.shape[0]
for i in range(self.epoch):
no_errors=0
for j in range(no_of_samples):
target = y[j]
xj=X[j]
predicted= self.predict(xj)
diff = target - predicted # compute error
update = self.learning_rate * diff #formula to update weight
self.weights[1:]+=update * xj
self.weights[0]+=update
if(diff!=0.0):
no_errors+=1
self.errors.append(no_errors)
print("epoch # "+str(i)+"no of error ="+str(no_errors)+"\n")
return self
def predict(self,X):
sum_of_input=self.net_input(X) #generate predicted output
if(sum_of_input >= 0):
return 1
else:
return -1
def net_input(self,X):
return np.dot(X,self.weights[1:])+self.weights[0] #np.dot is matrix multiplciation
def model(self):
print("Weight value of NN *the first is the bias.\n")
for val in self.weights:
print (str(val)+"\n")
####
#start to play
logicAnd = np.array([[1,1,1],
[1,0,-1],
[0,1,-1],
[0,0,-1]])
#start train
andgate= Perceptron(0.1,epoch=10)
X=np.array(logicAnd[:,0:2])
y=np.array(logicAnd[:,2])
andgate.training(X,y) # pass in input and target
andgate.model()
##test the NN
print(andgate.predict(np.array(X[0])))
print(andgate.predict(np.array(X[1])))
print(andgate.predict(np.array(X[2])))
print(andgate.predict(np.array(X[3])))
|
|
|
|
|
|
|
|
发表于 5-11-2017 01:40 AM
|
显示全部楼层
Input features
Target outputs
Input 1
Input 2
Output 1
Output 2
1
1
-1
-1
1
2
-1
-1
2
-1
-1
1
2
0
-1
1
-1
2
1
-1
-2
1
1
-1
-1
-1
1
1
-2
-2
1
1
import numpy as np
class Perceptron(object):
#learning_rate: float - Learning rate
#weights:array - weight values
#errors: list - number of wrong classification
#epoch: int - max no of iteration (epoch)
def __init__(self,learning_rate=0.01, epoch = 10):
self.learning_rate = learning_rate
self.epoch=epoch
def training(self,X,y):
#initilize the weight
self.weights = np.random.rand(1+X.shape[1])
self.errors = []
no_of_samples = X.shape[0]
for i in range(self.epoch):
no_errors=0
for j in range(no_of_samples):
target = y[j]
xj=X[j]
predicted= self.predict(xj)
diff = target - predicted # compute error
update = self.learning_rate * diff #formula to update weight
self.weights[1:]+=update * xj
self.weights[0]+=update
if(diff!=0.0):
no_errors+=1
self.errors.append(no_errors)
print("epoch # "+str(i)+"no of error ="+str(no_errors)+"\n")
return self
def predict(self,X):
sum_of_input=self.net_input(X) #generate predicted output
if(sum_of_input >= 0):
return 1
else:
return -1
def net_input(self,X):
return np.dot(X,self.weights[1:])+self.weights[0] #np.dot is matrix multiplciation
def model(self):
print("Weight value of NN *the first is the bias.\n")
for val in self.weights:
print (str(val)+"\n")
####
#start to play
logicAnd = np.array([[1,1,1],
[1,0,-1],
[0,1,-1],
[0,0,-1]])
#start train
andgate= Perceptron(0.1,epoch=10)
X=np.array(logicAnd[:,0:2])
y=np.array(logicAnd[:,2])
andgate.training(X,y) # pass in input and target
andgate.model()
##test the NN
print(andgate.predict(np.array(X[0])))
print(andgate.predict(np.array(X[1])))
print(andgate.predict(np.array(X[2])))
print(andgate.predict(np.array(X[3]))) |
|
|
|
|
|
|
|
发表于 20-11-2017 10:47 AM
|
显示全部楼层
|
|
|
|
|
|
| |
本周最热论坛帖子
|