User:Ping/Python Perceptron

From Noisebridge
< User:Ping(Difference between revisions)
Jump to: navigation, search
Line 17: Line 17:
 
     def __init__(self, size):
 
     def __init__(self, size):
 
         """The 'size' parameter sets the number of inputs to this Perceptron."""
 
         """The 'size' parameter sets the number of inputs to this Perceptron."""
         self.weights = [0.0]*size + [0.0]
+
         self.weights = [0]*size + [0]
  
 
     def __repr__(self):
 
     def __repr__(self):
Line 59: Line 59:
 
     def evaluate(self, inputs):
 
     def evaluate(self, inputs):
 
         """Just like Perceptron.evaluate, but apply a threshold."""
 
         """Just like Perceptron.evaluate, but apply a threshold."""
         return int(Perceptron.evaluate(self, inputs) > 0)
+
         return int(Perceptron.evaluate(self, inputs) >= 0)
  
 
def train_perceptron(perceptron, training_set,
 
def train_perceptron(perceptron, training_set,

Revision as of 22:52, 18 March 2009

The code below defines two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output). Internally, the Perceptron builds in a bias input by appending an extra 1 to the given inputs.

#!/usr/bin/env python

__author__ = 'Ka-Ping Yee <ping@zesty.ca>'

def dot_product(inputs, weights):
    """Compute the dot product of the two given lists of numbers."""
    return sum(input*weight for input, weight in zip(inputs, weights))

def mean(numbers):
    """Compute the mean of a list of numbers."""
    return float(sum(numbers))/len(numbers)

class Perceptron:
    def __init__(self, size):
        """The 'size' parameter sets the number of inputs to this Perceptron."""
        self.weights = [0]*size + [0]

    def __repr__(self):
        """Produce a string showing the internal weights of this Perceptron."""
        return '<%s: %r>' % (self.__class__.__name__, self.weights)

    def evaluate(self, inputs):
        """Evaluate this Perceptron with the given inputs, giving 0 or 1.
        'inputs' should be a list of numbers, and the length of the list
        should equal the 'size' used to construct this Perceptron."""
        return dot_product(self.weights, inputs + [1])

    def train(self, inputs, expected_output, rate):
        """Train this Perceptron for a single test case, using the given
        learning rate."""
        error = self.evaluate(inputs) - expected_output
        for i, input in enumerate(inputs + [1]):
            self.weights[i] -= input*error*rate

    def train_all(self, training_set, rate):
        """Train this Perceptron for all cases in the given training set."""
        for inputs, expected_output in training_set:
            self.train(inputs, expected_output, rate)

    def print_all(self, training_set):
        """Display how this Perceptron performs on the given training set."""
        print self
        for inputs, expected_output in training_set:
            output = self.evaluate(inputs)
            print '    %r -> %r (want %r)' % (inputs, output, expected_output)
        print 'RMS error:', self.rms_error(training_set)
        print

    def rms_error(self, training_set):
        """Compute the root-mean-square error across all the training cases."""
        sq_errors = [(self.evaluate(inputs) - expected_output)**2
                     for inputs, expected_output in training_set]
        return mean(sq_errors)**0.5

class BooleanPerceptron(Perceptron):
    def evaluate(self, inputs):
        """Just like Perceptron.evaluate, but apply a threshold."""
        return int(Perceptron.evaluate(self, inputs) >= 0)

def train_perceptron(perceptron, training_set,
                     initial_rate, minimum_rate, damping_factor,
                     error_threshold):
    """Train the given Perceptron repeatedly against all the cases in the
    training set, using the given initial learning rate and multiplying it
    by the damping factor each time.  Training stops when the RMS error drops
    below the error threshold or the learning rate reaches the minimum."""
    rate = initial_rate
    while rate > minimum_rate:
        perceptron.print_all(training_set)
        if perceptron.rms_error(training_set) <= error_threshold:
            print 'Success:', perceptron
            break
        perceptron.train_all(training_set, rate)
        rate *= damping_factor

# Train a Boolean Perceptron to be a three-input NAND gate.
training_set = [
    ([1, 0, 0], 1),
    ([1, 0, 1], 1),
    ([1, 1, 0], 1),
    ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]
train_perceptron(BooleanPerceptron(3), training_set, 0.1, 0, 1, 0)

# Train a floating-point Perceptron to fit a straight line with slope 2.
training_set = [
    ([1.0], 2.0),
    ([1.5], 3.0),
    ([2.0], 4.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)

# Train a floating-point Perceptron to fit a straight line with slope -3.
training_set = [
    ([-1.0], 6.0),
    ([1.5], -1.5),
    ([2.0], -3.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)
Personal tools