User:Ping/Python Perceptron

From Noisebridge
< User:Ping(Difference between revisions)
Jump to: navigation, search
Line 1: Line 1:
This Perceptron builds in a bias input (by internally appending an extra 1 to the inputs).
+
The code below defines two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output).  Internally, a bias input is built-in by appending an extra 1 to the given inputs.
 
+
<pre>#!/usr/bin/env python
+
 
+
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
+
 
+
def dot_product(inputs, weights):
+
    return sum(input*weight for input, weight in zip(inputs, weights))
+
 
+
class Perceptron:
+
    def __init__(self, size):
+
        """The 'size' parameter sets the number of inputs to this Perceptron."""
+
        self.weights = [0.0]*size + [0.0]
+
        self.threshold = 0.0
+
 
+
    def __repr__(self):
+
        """Display the weights and threshold of this Perceptron."""
+
        weights = '[%s]' % (', '.join('%.3g' % w for w in self.weights))
+
        return '<weights=%s, threshold=%r>' % (weights, self.threshold)
+
 
+
    def evaluate(self, inputs):
+
        """Evaluate this Perceptron with the given inputs, giving 0 or 1.
+
        'inputs' should be a list of numbers, and the length of the list
+
        should equal the 'size' used to construct this Perceptron."""
+
        return int(dot_product(self.weights, inputs + [1]) > self.threshold)
+
 
+
    def adjust(self, inputs, rate):
+
        """Adjust the weights of this Perceptron for the given inputs, using
+
        the given training rate."""
+
        for i, input in enumerate(inputs + [1]):
+
            self.weights[i] += rate*input
+
 
+
    def train(self, inputs, expected_output, rate):
+
        """Train this Perceptron for a single test case."""
+
        output = self.evaluate(inputs)
+
        self.adjust(inputs, rate*(expected_output - output))
+
 
+
    def train_all(self, training_set, rate):
+
        """Train this Perceptron for all cases in the given training set."""
+
        for inputs, expected_output in training_set:
+
            self.train(inputs, expected_output, rate)
+
 
+
    def check_all(self, training_set):
+
        """Check whether this Perceptron produces all the correct outputs."""
+
        print self
+
        failures = 0
+
        for inputs, expected_output in training_set:
+
            output = self.evaluate(inputs)
+
            print '    %r -> %r (want %r)' % (inputs, output, expected_output)
+
            if output != expected_output:
+
                failures += 1
+
        return not failures
+
 
+
training_set = [
+
    ([1, 0, 0], 1),
+
    ([1, 0, 1], 1),
+
    ([1, 1, 0], 1),
+
    ([1, 1, 1], 0),
+
    ([0, 1, 0], 1),
+
    ([0, 0, 1], 1),
+
    ([0, 1, 1], 1),
+
    ([0, 0, 0], 1),
+
]
+
 
+
perceptron = Perceptron(3)
+
rate = 0.1
+
while rate > 1e-9:
+
    if perceptron.check_all(training_set):
+
        print
+
        print 'Success:', perceptron
+
        break
+
    perceptron.train_all(training_set, rate)
+
    rate *= 0.99
+
 
+
</pre>
+
 
+
The version below has two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output).
+
  
 
<pre>#!/usr/bin/env python
 
<pre>#!/usr/bin/env python
Line 92: Line 16:
 
         """Display the internal weights of this Perceptron."""
 
         """Display the internal weights of this Perceptron."""
 
         weights = ', '.join('%.3g' % weight for weight in self.weights)
 
         weights = ', '.join('%.3g' % weight for weight in self.weights)
         return '<%s %s>' % (self.__class__.__name__, weights)
+
         return '<%s: [%s]>' % (self.__class__.__name__, weights)
  
 
     def evaluate(self, inputs):
 
     def evaluate(self, inputs):
Line 108: Line 32:
 
     def train(self, inputs, expected_output, rate):
 
     def train(self, inputs, expected_output, rate):
 
         """Train this Perceptron for a single test case."""
 
         """Train this Perceptron for a single test case."""
        output = self.evaluate(inputs)
+
         self.adjust(inputs, rate*(expected_output - self.evaluate(inputs)))
         self.adjust(inputs, rate*(expected_output - output))
+
  
 
     def train_all(self, training_set, rate):
 
     def train_all(self, training_set, rate):
Line 116: Line 39:
 
             self.train(inputs, expected_output, rate)
 
             self.train(inputs, expected_output, rate)
  
     def check_all(self, training_set):
+
     def print_all(self, training_set):
         """Check whether this Perceptron produces all the correct outputs."""
+
         """Print out what the Perceptron produces for the given training set."""
 
         print self
 
         print self
        failures = 0
 
 
         for inputs, expected_output in training_set:
 
         for inputs, expected_output in training_set:
 
             output = self.evaluate(inputs)
 
             output = self.evaluate(inputs)
 
             print '    %r -> %r (want %r)' % (inputs, output, expected_output)
 
             print '    %r -> %r (want %r)' % (inputs, output, expected_output)
            if output != expected_output:
+
        print 'RMS error:', self.rms_error(training_set)
                failures += 1
+
        print
         return not failures
+
 
 +
    def rms_error(self, training_set):
 +
        """Compute the root-mean-square error across all the training cases."""
 +
        error = sum((self.evaluate(inputs) - expected_output)**2
 +
                    for inputs, expected_output in training_set)
 +
         return (float(error)/len(training_set))**0.5
  
 
class BooleanPerceptron(Perceptron):
 
class BooleanPerceptron(Perceptron):
Line 132: Line 59:
 
         return int(Perceptron.evaluate(self, inputs) > 0)
 
         return int(Perceptron.evaluate(self, inputs) > 0)
  
 +
def train_perceptron(perceptron, training_set,
 +
                    initial_rate, minimum_rate, damping_factor,
 +
                    error_threshold):
 +
    rate = initial_rate
 +
    while rate > minimum_rate:
 +
        perceptron.print_all(training_set)
 +
        if perceptron.rms_error(training_set) < error_threshold:
 +
            print 'Success:', perceptron
 +
            break
 +
        perceptron.train_all(training_set, rate)
 +
        rate *= damping_factor
 +
 +
# Train a Boolean Perceptron to be a three-input NAND gate.
 
training_set = [
 
training_set = [
 
     ([1, 0, 0], 1),
 
     ([1, 0, 0], 1),
Line 142: Line 82:
 
     ([0, 0, 0], 1),
 
     ([0, 0, 0], 1),
 
]
 
]
 +
train_perceptron(BooleanPerceptron(3), training_set, 0.1, 1e-9, 0.9999, 1e-3)
  
perceptron = BooleanPerceptron(3)
+
# Train a floating-point Perceptron to fit a straight line.
rate = 0.1
+
training_set = [
while rate > 1e-9:
+
    ([1.0], 2.0),
     if perceptron.check_all(training_set):
+
     ([1.5], 3.0),
        print
+
     ([2.0], 4.0),
        print 'Success:', perceptron
+
]
        break
+
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-3)
     perceptron.train_all(training_set, rate)
+
    rate *= 0.9999
+
 
</pre>
 
</pre>

Revision as of 21:01, 18 March 2009

The code below defines two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output). Internally, a bias input is built-in by appending an extra 1 to the given inputs.

#!/usr/bin/env python

__author__ = 'Ka-Ping Yee <ping@zesty.ca>'

def dot_product(inputs, weights):
    return sum(input*weight for input, weight in zip(inputs, weights))

class Perceptron:
    def __init__(self, size):
        """The 'size' parameter sets the number of inputs to this Perceptron."""
        self.weights = [0.0]*size + [0.0]

    def __repr__(self):
        """Display the internal weights of this Perceptron."""
        weights = ', '.join('%.3g' % weight for weight in self.weights)
        return '<%s: [%s]>' % (self.__class__.__name__, weights)

    def evaluate(self, inputs):
        """Evaluate this Perceptron with the given inputs, giving 0 or 1.
        'inputs' should be a list of numbers, and the length of the list
        should equal the 'size' used to construct this Perceptron."""
        return dot_product(self.weights, inputs + [1])

    def adjust(self, inputs, rate):
        """Adjust the weights of this Perceptron for the given inputs, using
        the given training rate."""
        for i, input in enumerate(inputs + [1]):
            self.weights[i] += rate*input

    def train(self, inputs, expected_output, rate):
        """Train this Perceptron for a single test case."""
        self.adjust(inputs, rate*(expected_output - self.evaluate(inputs)))

    def train_all(self, training_set, rate):
        """Train this Perceptron for all cases in the given training set."""
        for inputs, expected_output in training_set:
            self.train(inputs, expected_output, rate)

    def print_all(self, training_set):
        """Print out what the Perceptron produces for the given training set."""
        print self
        for inputs, expected_output in training_set:
            output = self.evaluate(inputs)
            print '    %r -> %r (want %r)' % (inputs, output, expected_output)
        print 'RMS error:', self.rms_error(training_set)
        print

    def rms_error(self, training_set):
        """Compute the root-mean-square error across all the training cases."""
        error = sum((self.evaluate(inputs) - expected_output)**2
                    for inputs, expected_output in training_set)
        return (float(error)/len(training_set))**0.5

class BooleanPerceptron(Perceptron):
    def evaluate(self, inputs):
        """Just like Perceptron.evaluate, but apply a threshold."""
        return int(Perceptron.evaluate(self, inputs) > 0)

def train_perceptron(perceptron, training_set,
                     initial_rate, minimum_rate, damping_factor,
                     error_threshold):
    rate = initial_rate
    while rate > minimum_rate:
        perceptron.print_all(training_set)
        if perceptron.rms_error(training_set) < error_threshold:
            print 'Success:', perceptron
            break
        perceptron.train_all(training_set, rate)
        rate *= damping_factor

# Train a Boolean Perceptron to be a three-input NAND gate.
training_set = [
    ([1, 0, 0], 1),
    ([1, 0, 1], 1),
    ([1, 1, 0], 1),
    ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]
train_perceptron(BooleanPerceptron(3), training_set, 0.1, 1e-9, 0.9999, 1e-3)

# Train a floating-point Perceptron to fit a straight line.
training_set = [
    ([1.0], 2.0),
    ([1.5], 3.0),
    ([2.0], 4.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-3)
Personal tools