Home Contact

Python Perceptron


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import random


def heaviside_step(x):
    return 0 if x <= 0 else 1

def model_success(data, a, b, c):
    """Evaluate the success of the model by finding the % of
    correctly classified training data."""
    successes = 0
    for (x_e, x_g, desired) in data:
        weighted_sum = a*x_e + b*x_g + c
        if (heaviside_step(weighted_sum) == desired):
            successes += 1
    return successes / len(data)


if __name__ == "__main__":
    # Training data is stored in tuples in the form:
    # (x_e, x_g, desired_output)
    data = [
        (11, 83, 0), (18, 65, 0), (19, 37, 0), (14, 22, 0), (11, 12, 0), 
        (29, 10, 0), (35, 15, 0), (32, 27, 0), (43, 37, 0), (31, 50, 0),
        (10, 52, 0), (52, 14, 0), (33, 92, 1), (45, 84, 1), (60, 68, 1),
        (61, 85, 1), (85, 90, 1), (91, 79, 1), (86, 66, 1), (78, 59, 1),
        (77, 75, 1), (68, 53, 1), (78, 40, 1), (78, 22, 1), (93, 28, 1)
    ]

    # Initialise model weights
    a, b, c = 1, 1, 10
    # Iterate until convergence
    while model_success(data, a, b, c) < 1:
        # Randomly choose next training sample
        x_e, x_g, desired = random.choice(data)
        weighted_sum = a*x_e + b*x_g + c
        # If classified incorrectly, update parameters to approximate
        # a better solution.
        if (heaviside_step(weighted_sum) > desired):
            a -= x_e
            b -= x_g
            c -= 1
        elif (heaviside_step(weighted_sum) < desired):
            a += x_e
            b += x_g
            c += 1

    print("Perceptron trained")
    print(f"Decision boundary {a}x_e + {b}x_g + {c} = 0")