User:Ping/Python Perceptron: Difference between revisions

From Noisebridge
Jump to navigation Jump to search
No edit summary
No edit summary
Line 1: Line 1:
This Perceptron does no damping.  It succeeds for the included training set, but will run forever if you add the training case ([0, 1, 0], 1).
This Perceptron builds in a bias input (by internally appending an extra 1 to the inputs).


<pre>#!/usr/bin/env python
<pre>#!/usr/bin/env python
Line 6: Line 6:


def dot_product(inputs, weights):
def dot_product(inputs, weights):
     return sum(i*w for i, w in zip(inputs, weights))
     return sum(input*weight for input, weight in zip(inputs, weights))


class Perceptron:
class Perceptron:
     def __init__(self, weights, threshold):
     def __init__(self, size):
         self.weights = weights
         self.weights = [0.0]*size + [0.0]
         self.threshold = threshold
         self.threshold = 0.0


     def __repr__(self):
     def __repr__(self):
         weights = '[%s]' % (', '.join('%.3g' % w for w in self.weights))
         weights = '[%s]' % (', '.join('%.3g' % w for w in self.weights))
         return 'Perceptron(%s, %r)' % (weights, self.threshold)
         return '<weights=%s, threshold=%r>' % (weights, self.threshold)


     def evaluate(self, inputs):
     def evaluate(self, inputs):
         return int(dot_product(self.weights, inputs) > self.threshold)
         return int(dot_product(self.weights, inputs + [1]) > self.threshold)


     def adjust(self, inputs, delta):
     def adjust(self, inputs, delta):
         for i in range(len(inputs)):
         for i, input in enumerate(inputs + [1]):
             self.weights[i] += delta*inputs[i]
             self.weights[i] += delta*input


def train(perceptron, inputs, expected_output, delta):
def train(perceptron, inputs, expected_output, delta):
     output = perceptron.evaluate(inputs)
     output = perceptron.evaluate(inputs)
     perceptron.adjust(inputs, delta * (expected_output - output))
     perceptron.adjust(inputs, delta*(expected_output - output))


def train_set(perceptron, training_set, delta):
def train_set(perceptron, training_set, delta):
Line 47: Line 47:
     ([1, 1, 0], 1),
     ([1, 1, 0], 1),
     ([1, 1, 1], 0),
     ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]
]


perceptron = Perceptron([0.0, 0.0, 0.0], 0.5)
perceptron = Perceptron(3)
while not check_set(perceptron, training_set):
delta = 0.1
     train_set(perceptron, training_set, 0.1)
while delta > 1e-9:
    if check_set(perceptron, training_set):
        print
        print 'Success:', perceptron
        break
     train_set(perceptron, training_set, delta)
    delta *= 0.99


print
print 'Success:', perceptron
</pre>
</pre>

Revision as of 20:15, 18 March 2009

This Perceptron builds in a bias input (by internally appending an extra 1 to the inputs).

#!/usr/bin/env python

__author__ = 'Ka-Ping Yee <ping@zesty.ca>'

def dot_product(inputs, weights):
    return sum(input*weight for input, weight in zip(inputs, weights))

class Perceptron:
    def __init__(self, size):
        self.weights = [0.0]*size + [0.0]
        self.threshold = 0.0

    def __repr__(self):
        weights = '[%s]' % (', '.join('%.3g' % w for w in self.weights))
        return '<weights=%s, threshold=%r>' % (weights, self.threshold)

    def evaluate(self, inputs):
        return int(dot_product(self.weights, inputs + [1]) > self.threshold)

    def adjust(self, inputs, delta):
        for i, input in enumerate(inputs + [1]):
            self.weights[i] += delta*input

def train(perceptron, inputs, expected_output, delta):
    output = perceptron.evaluate(inputs)
    perceptron.adjust(inputs, delta*(expected_output - output))

def train_set(perceptron, training_set, delta):
    for inputs, expected_output in training_set:
        train(perceptron, inputs, expected_output, delta)

def check_set(perceptron, training_set):
    print perceptron
    failures = 0
    for inputs, expected_output in training_set:
        output = perceptron.evaluate(inputs)
        print '    %r -> %r (should be %r)' % (inputs, output, expected_output)
        if output != expected_output:
            failures += 1
    return not failures

training_set = [
    ([1, 0, 0], 1),
    ([1, 0, 1], 1),
    ([1, 1, 0], 1),
    ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]

perceptron = Perceptron(3)
delta = 0.1
while delta > 1e-9:
    if check_set(perceptron, training_set):
        print
        print 'Success:', perceptron
        break
    train_set(perceptron, training_set, delta)
    delta *= 0.99