Author Archives: Christian Seberino

Perceptron Method

The perceptron method is one of the earliest and simplest artificial neural network supervised learning methods. It involves the single function H(w · i + b) where H is the Heaviside step function and i is the input. w and b are referred to as the weights and the bias. For every input output pair (i, o), a scaled version of i is added to w, and, the scale factor of i is added to b. The scale factor for every i is r(o – H(w · i + b)) for some small r referred to as the learning rate.

Here is sample Python perceptron method code:

#!/usr/bin/env python3

"""
Implements the perceptron method.

Usage:
        ./perceptron <data file> <data split> <learning rate> <number of epochs>

Data files must be space delimited with one input output pair per line.

initialization steps:
        Input output pairs are shuffled.
        Inputs             are min max normalized.
        Weights            are set to random values.

Requires NumPy.
"""

import numpy
import sys

def minmax(data):
        """
        Finds the min max normalizations of data.
        """

        return (data - numpy.min(data)) / (numpy.max(data) - numpy.min(data))

def init_data(data_file, data_split):
        """
        Creates the training and testing data.
        """

        data         = numpy.loadtxt(data_file)
        numpy.random.shuffle(data)
        data[:, :-1] = minmax(data[:, :-1])
        ones         = numpy.ones(data.shape[0])[None].T
        data         = numpy.hstack((data[:, :-1], ones, data[:, -1][None].T))
        data_split   = int((data_split / 100) * data.shape[0])

        return data[:data_split, :], data[data_split:, :]

def accuracy(data, weights):
        """
        Calculates the accuracies of models.
        """

        model_ = model(data[:, :-1], weights)

        return 100 * (model_ == data[:, -1]).astype(int).mean()

def model(inputs, weights):
        """
        Finds the model outputs.
        """

        return (numpy.matmul(inputs, weights) > 0).astype(int)

def learn(data, learn_rate, n_epochs):
        """
        Learns the weights from data.
        """

        weights = numpy.random.rand(data.shape[1] - 1) / (data.shape[1] - 1)
        for i in range(n_epochs):
                for e in data:
                        model_   = model(e[:-1], weights)
                        weights += learn_rate * (e[-1] - model_) * e[:-1]

        return weights

train_data, test_data = init_data(sys.argv[1], float(sys.argv[2]))
weights               = learn(train_data, float(sys.argv[3]), int(sys.argv[4]))
print(f"weights and bias:       {weights}")
print(f"training data accuracy: {accuracy(train_data, weights):.2f}%")
print(f"testing  data accuracy: {accuracy(test_data,  weights):.2f}%")

Here are sample results for a subset of the popular MNIST (Modified National Institute Of Standards And Technology) dataset available from many sources such as Kaggle. Outputs denote whether the inputs correspond to the number eight or not:

% ./perceptron MNIST_subset_dataset.csv 80 0.000001 100
weights and bias:       [ 1.26322608e-03  1.08497202e-03  1.03465701e-03  6.20197534e-05
  8.92840895e-04  3.13696893e-04  9.32305752e-04  5.30571491e-04
  9.57601044e-04  9.92650699e-04  4.41735355e-04  9.50010528e-04
  7.11471738e-04  1.26831615e-03  7.15789174e-04  1.59426438e-04

...

  9.04247841e-04  7.11406621e-04  2.85485411e-04 -3.17756922e-05
  6.38906024e-04  9.42761704e-04  1.01108588e-03  3.51662937e-04
  8.18848025e-04  5.85304004e-04  1.77400185e-05  1.27172550e-03
 -1.72279550e-03]
training data accuracy: 90.04%
testing  data accuracy: 85.89%

Here is a plot of the accuracy versus the number of epochs for a data split of 80 / 20 and a learning rate of 0.000001. Blue denotes the training data accuracy and orange denotes the testing data accuracy:

K Nearest Neighbors Method

The k nearest neighbors method is one of the simplest supervised learning methods. It involves finding the most similar inputs in the set of input output pairs.

Here is sample Python code to determine the accuracy of the k nearest neighbors method on data:

#!/usr/bin/env python3

"""
Determines the accuracy of the k nearest neighbors method on data.

Usage:
        ./k_nn <data file> <data split> <number of nearest neighbors>

Data files must be space delimited with one input output pair per line.

initialization steps:
        Input output pairs are shuffled.
        Inputs             are min max normalized.

Requires SciPy and NumPy.
"""

import scipy.stats
import numpy
import sys

def minmax(data):
        """
        Finds the min max normalizations of data.
        """

        return (data - numpy.min(data)) / (numpy.max(data) - numpy.min(data))

def init_data(data_file, data_split):
        """
        Creates the model and testing data.
        """

        data         = numpy.loadtxt(data_file)
        numpy.random.shuffle(data)
        data[:, :-1] = minmax(data[:, :-1])
        data_split   = int((data_split / 100) * data.shape[0])

        return data[:data_split, :], data[data_split:, :]

def accuracy(model_data, test_data, n_nn):
        """
        Calculates the accuracies of models.
        """

        model_ = model(test_data[:, :-1], model_data, n_nn)

        return 100 * (model_ == test_data[:, -1]).astype(int).mean()

def model_(input_, model_data, n_nn):
        """
        model helper function
        """

        squares = (input_ - model_data[:, :-1]) ** 2
        indices = numpy.sum(squares, 1).argsort()[:n_nn]

        return scipy.stats.mode(numpy.take(model_data[:, -1], indices))[0][0]

def model(inputs, model_data, n_nn):
        """
        Finds the model outputs.
        """

        return numpy.apply_along_axis(lambda e : model_(e, model_data, n_nn),
                                      1,
                                      inputs)

model_data, test_data = init_data(sys.argv[1], float(sys.argv[2]))
n_nn                  = int(sys.argv[3])
print(f"testing data accuracy: {accuracy(model_data, test_data, n_nn):.2f}%")

Here are sample results for the popular Iris_flower_dataset available from many sources such as Sklearn:

% ./k_nn Iris_flower_dataset.csv 80 1
testing data accuracy: 96.67%

% ./k_nn Iris_flower_dataset 80 2
testing data accuracy: 93.33%