User:Ping/Python Perceptron: Difference between revisions

From Noisebridge
Jump to navigation Jump to search
(New page: <pre>#!/usr/bin/env python __author__ = 'Ka-Ping Yee <ping@zesty.ca>' def dot_product(inputs, weights): return sum(i*w for i, w in zip(inputs, weights)) class Perceptron: def __...)
 
No edit summary
 
(13 intermediate revisions by the same user not shown)
Line 1: Line 1:
<pre>#!/usr/bin/env python
The code below defines two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output).  Internally, the Perceptron builds in a bias input by appending an extra 1 to the given inputs.
 
<pre>
#!/usr/bin/env python


__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'


def dot_product(inputs, weights):
def dot_product(inputs, weights):
     return sum(i*w for i, w in zip(inputs, weights))
    """Compute the dot product of the two given lists of numbers."""
     return sum(input*weight for input, weight in zip(inputs, weights))
 
def mean(numbers):
    """Compute the mean of a list of numbers."""
    return float(sum(numbers))/len(numbers)


class Perceptron:
class Perceptron:
     def __init__(self, weights, threshold):
     def __init__(self, size):
         self.weights = weights
         """The 'size' parameter sets the number of inputs to this Perceptron."""
         self.threshold = threshold
         self.weights = [0]*size + [0]


     def __repr__(self):
     def __repr__(self):
         weights = '[%s]' % (', '.join('%.3g' % w for w in self.weights))
         """Produce a string showing the internal weights of this Perceptron."""
         return 'Perceptron(%s, %r)' % (weights, self.threshold)
         return '<%s: %r>' % (self.__class__.__name__, self.weights)


     def evaluate(self, inputs):
     def evaluate(self, inputs):
         return int(dot_product(self.weights, inputs) > self.threshold)
        """Evaluate this Perceptron with the given inputs to give a float.
        'inputs' should be a list of numbers, and the length of the list
        should equal the 'size' used to construct this Perceptron."""
         return dot_product(self.weights, inputs + [1])


     def adjust(self, inputs, delta):
     def train(self, inputs, expected_output, rate):
         for i in range(len(inputs)):
        """Train this Perceptron for a single test case, using the given
             self.weights[i] += delta*inputs[i]
        learning rate."""
        error = self.evaluate(inputs) - expected_output
         for i, input in enumerate(inputs + [1]):
             self.weights[i] -= input*error*rate


def train(perceptron, inputs, expected_output, delta):
    def train_all(self, training_set, rate):
    output = perceptron.evaluate(inputs)
        """Train this Perceptron for all cases in the given training set."""
    perceptron.adjust(inputs, delta * (expected_output - output))
        for inputs, expected_output in training_set:
            self.train(inputs, expected_output, rate)


def train_set(perceptron, training_set, delta):
    def print_all(self, training_set):
    for inputs, expected_output in training_set:
        """Display how this Perceptron performs on the given training set."""
        train(perceptron, inputs, expected_output, delta)
        print self
        for inputs, expected_output in training_set:
            output = self.evaluate(inputs)
            print '    %r -> %r (want %r)' % (inputs, output, expected_output)
        print 'RMS error:', self.rms_error(training_set)
        print


def check_set(perceptron, training_set):
    def rms_error(self, training_set):
    print perceptron
        """Compute the root-mean-square error across all the training cases."""
    failures = 0
         sq_errors = [(self.evaluate(inputs) - expected_output)**2
    for inputs, expected_output in training_set:
                    for inputs, expected_output in training_set]
         output = perceptron.evaluate(inputs)
         return mean(sq_errors)**0.5
        print '    %r -> %r (should be %r)' % (inputs, output, expected_output)
         if output != expected_output:
            failures += 1
    return not failures


class BooleanPerceptron(Perceptron):
    def evaluate(self, inputs):
        """Evaluate this Perceptron with the given inputs, giving 0 or 1.
        This just applies a threshold to the result of Perceptron.evaluate."""
        return int(Perceptron.evaluate(self, inputs) >= 0)
def train_perceptron(perceptron, training_set,
                    initial_rate, minimum_rate, damping_factor,
                    error_threshold):
    """Train the given Perceptron repeatedly against all the cases in the
    training set, using the given initial learning rate and multiplying it
    by the damping factor each time.  Training stops when the RMS error drops
    below the error threshold or the learning rate reaches the minimum."""
    rate = initial_rate
    while rate > minimum_rate:
        perceptron.print_all(training_set)
        if perceptron.rms_error(training_set) <= error_threshold:
            print 'Success:', perceptron
            break
        perceptron.train_all(training_set, rate)
        rate *= damping_factor
# Train a Boolean Perceptron to be a three-input NAND gate.
training_set = [
training_set = [
     ([1, 0, 0], 1),
     ([1, 0, 0], 1),
Line 45: Line 84:
     ([1, 1, 0], 1),
     ([1, 1, 0], 1),
     ([1, 1, 1], 0),
     ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]
]
train_perceptron(BooleanPerceptron(3), training_set, 0.1, 0, 1, 0)


perceptron = Perceptron([0.0, 0.0, 0.0], 0.5)
# Train a floating-point Perceptron to fit a straight line with slope 2.
while not check_set(perceptron, training_set):
training_set = [
    train_set(perceptron, training_set, 0.1)
    ([1.0], 2.0),
    ([1.5], 3.0),
    ([2.0], 4.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)


print
# Train a floating-point Perceptron to fit a straight line with slope -3.
print 'Success:', perceptron
training_set = [
    ([-1.0], 6.0),
    ([1.5], -1.5),
    ([2.0], -3.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)
</pre>
</pre>

Latest revision as of 22:54, 18 March 2009

The code below defines two classes: Perceptron (which produces a floating-point output) and BooleanPerceptron (which produces a Boolean output). Internally, the Perceptron builds in a bias input by appending an extra 1 to the given inputs.

#!/usr/bin/env python

__author__ = 'Ka-Ping Yee <ping@zesty.ca>'

def dot_product(inputs, weights):
    """Compute the dot product of the two given lists of numbers."""
    return sum(input*weight for input, weight in zip(inputs, weights))

def mean(numbers):
    """Compute the mean of a list of numbers."""
    return float(sum(numbers))/len(numbers)

class Perceptron:
    def __init__(self, size):
        """The 'size' parameter sets the number of inputs to this Perceptron."""
        self.weights = [0]*size + [0]

    def __repr__(self):
        """Produce a string showing the internal weights of this Perceptron."""
        return '<%s: %r>' % (self.__class__.__name__, self.weights)

    def evaluate(self, inputs):
        """Evaluate this Perceptron with the given inputs to give a float.
        'inputs' should be a list of numbers, and the length of the list
        should equal the 'size' used to construct this Perceptron."""
        return dot_product(self.weights, inputs + [1])

    def train(self, inputs, expected_output, rate):
        """Train this Perceptron for a single test case, using the given
        learning rate."""
        error = self.evaluate(inputs) - expected_output
        for i, input in enumerate(inputs + [1]):
            self.weights[i] -= input*error*rate

    def train_all(self, training_set, rate):
        """Train this Perceptron for all cases in the given training set."""
        for inputs, expected_output in training_set:
            self.train(inputs, expected_output, rate)

    def print_all(self, training_set):
        """Display how this Perceptron performs on the given training set."""
        print self
        for inputs, expected_output in training_set:
            output = self.evaluate(inputs)
            print '    %r -> %r (want %r)' % (inputs, output, expected_output)
        print 'RMS error:', self.rms_error(training_set)
        print

    def rms_error(self, training_set):
        """Compute the root-mean-square error across all the training cases."""
        sq_errors = [(self.evaluate(inputs) - expected_output)**2
                     for inputs, expected_output in training_set]
        return mean(sq_errors)**0.5

class BooleanPerceptron(Perceptron):
    def evaluate(self, inputs):
        """Evaluate this Perceptron with the given inputs, giving 0 or 1.
        This just applies a threshold to the result of Perceptron.evaluate."""
        return int(Perceptron.evaluate(self, inputs) >= 0)

def train_perceptron(perceptron, training_set,
                     initial_rate, minimum_rate, damping_factor,
                     error_threshold):
    """Train the given Perceptron repeatedly against all the cases in the
    training set, using the given initial learning rate and multiplying it
    by the damping factor each time.  Training stops when the RMS error drops
    below the error threshold or the learning rate reaches the minimum."""
    rate = initial_rate
    while rate > minimum_rate:
        perceptron.print_all(training_set)
        if perceptron.rms_error(training_set) <= error_threshold:
            print 'Success:', perceptron
            break
        perceptron.train_all(training_set, rate)
        rate *= damping_factor

# Train a Boolean Perceptron to be a three-input NAND gate.
training_set = [
    ([1, 0, 0], 1),
    ([1, 0, 1], 1),
    ([1, 1, 0], 1),
    ([1, 1, 1], 0),
    ([0, 1, 0], 1),
    ([0, 0, 1], 1),
    ([0, 1, 1], 1),
    ([0, 0, 0], 1),
]
train_perceptron(BooleanPerceptron(3), training_set, 0.1, 0, 1, 0)

# Train a floating-point Perceptron to fit a straight line with slope 2.
training_set = [
    ([1.0], 2.0),
    ([1.5], 3.0),
    ([2.0], 4.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)

# Train a floating-point Perceptron to fit a straight line with slope -3.
training_set = [
    ([-1.0], 6.0),
    ([1.5], -1.5),
    ([2.0], -3.0),
]
train_perceptron(Perceptron(1), training_set, 0.1, 1e-9, 0.9999, 1e-6)