User:Ping/Python Perceptron

From Noisebridge
< User:Ping(Difference between revisions)
Jump to: navigation, search
Line 17: Line 17:
 
     def __repr__(self):
 
     def __repr__(self):
 
         """Display the internal weights of this Perceptron."""
 
         """Display the internal weights of this Perceptron."""
        weights = ', '.join('%.3g' % weight for weight in self.weights)
+
         return '<%s: %r>' % (self.__class__.__name__, self.weights)
         return '<%s: [%s]>' % (self.__class__.__name__, weights)
+
  
 
     def evaluate(self, inputs):
 
     def evaluate(self, inputs):
Line 31: Line 30:
 
         error = self.evaluate(inputs) - expected_output
 
         error = self.evaluate(inputs) - expected_output
 
         for i, input in enumerate(inputs + [1]):
 
         for i, input in enumerate(inputs + [1]):
             self.weights[i] -= error*input*rate
+
             self.weights[i] -= input*error*rate
  
 
     def train_all(self, training_set, rate):
 
     def train_all(self, training_set, rate):
Line 39: Line 38:
  
 
     def print_all(self, training_set):
 
     def print_all(self, training_set):
         """Print out what the Perceptron produces for the given training set."""
+
         """Show how this Perceptron performs on the given training set."""
 
         print self
 
         print self
 
         for inputs, expected_output in training_set:
 
         for inputs, expected_output in training_set:
Line 93: Line 92:
 
     ([2.0], 4.0),
 
     ([2.0], 4.0),
 
]
 
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-3)
+
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)
 
</pre>
 
</pre>

Revision as of 21:15, 18 March 2009

The code below defines two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output). Internally, a bias input is built-in by appending an extra 1 to the given inputs.

#!/usr/bin/env python

__author__ = 'Ka-Ping Yee <ping@zesty.ca>'

def dot_product(inputs, weights):
    """Compute the dot product of the two given lists of numbers."""
    return sum(input*weight for input, weight in zip(inputs, weights))

class Perceptron:
    def __init__(self, size):
        """The 'size' parameter sets the number of inputs to this Perceptron."""
        self.weights = [0.0]*size + [0.0]

    def __repr__(self):
        """Display the internal weights of this Perceptron."""
        return '<%s: %r>' % (self.__class__.__name__, self.weights)

    def evaluate(self, inputs):
        """Evaluate this Perceptron with the given inputs, giving 0 or 1.
        'inputs' should be a list of numbers, and the length of the list
        should equal the 'size' used to construct this Perceptron."""
        return dot_product(self.weights, inputs + [1])

    def train(self, inputs, expected_output, rate):
        """Train this Perceptron for a single test case, using the given
        learning rate."""
        error = self.evaluate(inputs) - expected_output
        for i, input in enumerate(inputs + [1]):
            self.weights[i] -= input*error*rate

    def train_all(self, training_set, rate):
        """Train this Perceptron for all cases in the given training set."""
        for inputs, expected_output in training_set:
            self.train(inputs, expected_output, rate)

    def print_all(self, training_set):
        """Show how this Perceptron performs on the given training set."""
        print self
        for inputs, expected_output in training_set:
            output = self.evaluate(inputs)
            print '    %r -> %r (want %r)' % (inputs, output, expected_output)
        print 'RMS error:', self.rms_error(training_set)
        print

    def rms_error(self, training_set):
        """Compute the root-mean-square error across all the training cases."""
        error = sum((self.evaluate(inputs) - expected_output)**2
                    for inputs, expected_output in training_set)
        return (float(error)/len(training_set))**0.5

class BooleanPerceptron(Perceptron):
    def evaluate(self, inputs):
        """Just like Perceptron.evaluate, but apply a threshold."""
        return int(Perceptron.evaluate(self, inputs) > 0)

def train_perceptron(perceptron, training_set,
                     initial_rate, minimum_rate, damping_factor,
                     error_threshold):
    """Train the given Perceptron repeatedly against all the cases in the
    training set, using the given initial learning rate and multiplying it
    by the damping factor each time.  Training stops when the RMS error drops
    below the error threshold or the learning rate reaches the minimum."""
    rate = initial_rate
    while rate > minimum_rate:
        perceptron.print_all(training_set)
        if perceptron.rms_error(training_set) <= error_threshold:
            print 'Success:', perceptron
            break
        perceptron.train_all(training_set, rate)
        rate *= damping_factor

# Train a Boolean Perceptron to be a three-input NAND gate.
training_set = [
    ([1, 0, 0], 1),
    ([1, 0, 1], 1),
    ([1, 1, 0], 1),
    ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]
train_perceptron(BooleanPerceptron(3), training_set, 0.1, 0, 1, 0)

# Train a floating-point Perceptron to fit a straight line.
training_set = [
    ([1.0], 2.0),
    ([1.5], 3.0),
    ([2.0], 4.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)
Personal tools