#! /usr/bin/env python3 # def perceptron_test ( ): #*****************************************************************************80 # ## perceptron_test() tests the perceptron algorithm. # # Licensing: # # This code is distributed under the MIT license. # # Modified: # # 24 July 2022 # # Author: # # John Burkardt # import numpy as np import platform print ( '' ) print ( 'perceptron_test():' ) print ( ' python version: ' + platform.python_version ( ) ) print ( ' numpy version: ' + np.version.version ) print ( ' Use the perceptron algorithm to determine a classifier' ) print ( ' for sets of data.' ) dividing_line_test ( ) generator_ratings_test ( ) # # Terminate. # print ( '' ) print ( 'perceptron_test():' ) print ( ' Normal end of execution.' ) return def dividing_line_test ( ): #*****************************************************************************80 # ## dividing_line_test() analyzes the dividing_line data. # # Licensing: # # This code is distributed under the MIT license. # # Modified: # # 24 July 2022 # # Author: # # John Burkardt # from numpy.random import default_rng import matplotlib.pyplot as plt import numpy as np rng = default_rng() print ( '' ) print ( 'dividing_line_test():' ) print ( ' Apply the perceptron to an artificial data set.' ) print ( ' Points (x,y) are in set 0 if y < 3x + 2' ) print ( ' Points (x,y) are in set 1 if y > 3x + 2' ) print ( ' Generate 25 points at random and search for a classifier' ) print ( ' using the perceptron algorithm' ) # # Generate the training data. # train_num = 25 print ( '' ) print ( ' Number of training data values = ', train_num ) x = rng.random ( train_num ) y = 3.0 * rng.random ( train_num ) + 2.0 z = y > 3.0 * x + 2 z = z.astype(int) i = np.arange ( 0, train_num ) l = np.array ( [ i, x, y, z ] ) print ( '' ) print ( ' Training data:' ) print ( l.T ) # # Display the training data # x_min = np.min ( x ) x_max = np.max ( x ) y_min = np.min ( y ) y_max = np.max ( y ) good = ( z == 1 ) bad = ( z == 0 ) n_good = len ( good ) n_bad = len ( bad ) bad_x_mean = np.mean ( x[bad] ) bad_y_mean = np.mean ( y[bad] ) good_x_mean = np.mean ( x[good] ) good_y_mean = np.mean ( y[good] ) print ( ' %d good y, and %d bad y' % ( n_good, n_bad ) ) print ( ' %g <= X <= %g' % ( x_min, x_max ) ) print ( ' %g <= Y <= %g' % ( y_min, y_max ) ) print ( ' GOOD: mean X = %g, mean Y = %g' % ( good_x_mean, good_y_mean ) ) print ( ' BAD: mean X = %g, mean Y = %g' % ( bad_x_mean, bad_y_mean ) ) plt.plot ( x[good], y[good], 'b+' ) plt.plot ( x[bad], y[bad], 'ro' ) plt.xlabel ( '<-- X -->' ) plt.ylabel ( '<-- Y -->' ) plt.title ( 'y > 3x+2' ) plt.grid ( True ) filename = 'dividing_line_data.png' plt.savefig ( filename ) plt.show ( block = False ) print ( ' Graphics saved as "%s"' % ( filename ) ) plt.close ( ) # # Perceptron algorithm. # alpha = 0.05 print ( ' Using learning rate alpha =', alpha ) step_num = 1000 print ( ' Using ', step_num, 'steps' ) m = 3 w = np.ones ( m ) / m data = np.zeros ( [ train_num, 3 ] ) data[:,0] = 1.0 data[:,1] = np.copy ( x ) data[:,2] = np.copy ( y ) e = 1 step = 0 while ( e != 0 and step < step_num ): e = 0 step = step + 1 for i in range ( 0, train_num ): f = ( 0 < np.dot ( data[i,:], w[:] ) ) e = e + ( z[i] != f ) w = w + alpha * np.dot ( data[i,:], ( z[i] - f ) ) w = w / np.linalg.norm ( w ) if ( e == 0 ): print ( ' All training data classified on step', step ) else: print ( ' Iteration terminated without convergence on step', step ) print ( ' e =', e ) # # Report. # print ( '' ) print ( ' Perceptron weights:' ) print ( ' f(x) = %g + %g * x + %g * y' % ( w[0], w[1], w[2] ) ) print ( '' ) print ( ' Index x*w (0' ) plt.ylabel ( '<-- Y -->' ) plt.title ( 'y > 3x+2 classifier' ) plt.grid ( True ) filename = 'dividing_line_classified.png' plt.savefig ( filename ) plt.show ( block = False ) print ( ' Graphics saved as "%s"' % ( filename ) ) plt.close ( ) return def generator_ratings_test ( ): #*****************************************************************************80 # ## generator_ratings_test() analyzes the generator ratings data. # # Licensing: # # This code is distributed under the MIT license. # # Modified: # # 24 July 2022 # # Author: # # John Burkardt # import matplotlib.pyplot as plt import numpy as np filename = 'generators.txt' print ( '' ) print ( 'generator_ratings_test():' ) print ( ' Apply the perceptron to generator ratings data.' ) print ( ' Data file is "', filename, '"' ) print ( ' Data is index, rpm, vibration, rating' ) print ( ' Rating is 0 for bad, 1 for good.' ) print ( ' Use the perceptron algorithm to determine a' ) print ( ' classifier f(rpm,vibration) -> {0, 1}' ) # # Read the data file. # print ( '' ) print ( ' Generator ratings' ) data = np.loadtxt ( filename ) # # Copy columns of data into variables. # rpm = data[:,1] vib = data[:,2] grade = data[:,3] # # Count the number of cases. # n = len ( rpm ) print ( '' ) print ( ' Number of generators = %d' % ( n ) ) # # Part 1: Display all data # rpm_min = np.min ( rpm ) rpm_max = np.max ( rpm ) vib_min = np.min ( vib ) vib_max = np.max ( vib ) good = ( grade == +1.0 ) bad = ( grade == 0.0 ) n_good = len ( good ) n_bad = len ( bad ) bad_rpm_mean = np.mean ( rpm[bad] ) bad_vib_mean = np.mean ( vib[bad] ) good_rpm_mean = np.mean ( rpm[good] ) good_vib_mean = np.mean ( vib[good] ) print ( ' %d good generators, and %d bad generators' % ( n_good, n_bad ) ) print ( ' %g <= RPM <= %g' % ( rpm_min, rpm_max ) ) print ( ' %g <= VIB <= %g' % ( vib_min, vib_max ) ) print ( ' GOOD: mean RPM = %g, mean VIB = %g' % ( good_rpm_mean, good_vib_mean ) ) print ( ' BAD: mean RPM = %g, mean VIB = %g' % ( bad_rpm_mean, bad_vib_mean ) ) plt.plot ( rpm[good], vib[good], 'b+', rpm[bad], vib[bad], 'ro' ) plt.xlabel ( '<-- RPM -->' ) plt.ylabel ( '<-- VIB -->' ) plt.title ( 'Generator ratings' ) plt.grid ( True ) plt.axis ( 'equal' ) filename = 'generator_ratings_data.png' plt.savefig ( filename ) plt.show ( block = False ) print ( ' Graphics saved as "%s"' % ( filename ) ) plt.close ( ) # # Part 2, find a classifier for the data. # # # Work with normalized data. # r = ( rpm - np.min ( rpm ) ) / ( np.max ( rpm ) - np.min ( rpm ) ) v = ( vib - np.min ( vib ) ) / ( np.max ( vib ) - np.min ( vib ) ) # # Perceptron algorithm. # alpha = 0.01 m = 3 w = np.ones ( m ) / m x = np.zeros ( [ n, 3 ] ) x[:,0] = 1.0 x[:,1] = np.copy ( r ) x[:,2] = np.copy ( v ) e = 1 step = 0 while ( e != 0 and step < 100 ): e = 0 step = step + 1 for i in range ( 0, n ): f = ( 0 < np.dot ( x[i,:], w[:] ) ) e = e + ( grade[i] != f ) w = w + alpha * np.dot ( x[i,:], ( grade[i] - f ) ) w = w / np.linalg.norm ( w ) if ( e == 0 ): print ( ' All training data classified on step %d' % ( step ) ) else: print ( ' Iteration terminated without convergence on step %d' % ( step ) ) # # Report. # print ( '' ) print ( ' Perceptron weights:' ) print ( ' f(x) = %g + %g * r + %g * v' % ( w[0], w[1], w[2] ) ) print ( '' ) print ( '' ) print ( ' Index x*w (0' ) plt.ylabel ( '<-- VIB -->' ) plt.title ( 'Generator ratings classifier' ) plt.grid ( True ) plt.axis ( 'equal' ) filename = 'generator_ratings_classified.png' plt.savefig ( filename ) plt.show ( block = False ) print ( ' Graphics saved as "%s"' % ( filename ) ) plt.close ( ) return def timestamp ( ): #*****************************************************************************80 # ## timestamp() prints the date as a timestamp. # # Licensing: # # This code is distributed under the MIT license. # # Modified: # # 06 April 2013 # # Author: # # John Burkardt # import time t = time.time ( ) print ( time.ctime ( t ) ) return None if ( __name__ == '__main__' ): timestamp ( ) perceptron_test ( ) timestamp ( )