#!/usr/bin/python
down = False
up = True
def dp (inputs, weights) :
return sum(i*w for i,w in zip(inputs, weights))
learning_rate = 0.1
threshhold = 0.5
weights = [0,0,0]
def co(inputs) :
print "co"
prod = dp( inputs, weights )
print "dot_product: " + str(prod)
if prod > threshhold :
return 1
else :
return 0
# iterate over inputs and bump the corresponding weights if the input was 1
def bump_weights( inputs, up_or_down ) :
print "bump_weights"
for x, val in enumerate( inputs ) :
val = inputs[x]
if val == 1 :
if up_or_down :
weights[x] += learning_rate
else :
weights[x] -= learning_rate
datasets = [[1,0,0,1],[1,0,1,1],[1,1,0,1],[1,1,1,0]]
#learn([1,1,0],1)
#print weights
# weights remains a global variable
count = 0
correct = [False,False,False,False]
while count < len(datasets) :
dataset = datasets[count]
inputs = dataset[0:3]
expected = dataset[3]
print "inputs: " + ', '.join(str(x) for x in inputs )
print "weights: " + ', '.join(str(x) for x in weights )
result = co( inputs )
print "expected: " + str(expected)
print "and got : " + str(result)
correct[count] = True
if result > expected :
print "too big"
correct[count] = False
bump_weights( inputs, down )
if result < expected :
print "too small"
correct[count] = False
bump_weights( inputs, up )
count += 1
if count == len(datasets) and correct != [True,True,True,True]:
count = 0
print "\n"