Version:0.9 StartHTML:0000000105 EndHTML:0000055974 StartFragment:0000001085 EndFragment:0000055958
program Perceptron1_2;
(* #sign:max: MAXBOX8: 14/05/2018 19:55:56
* implements a version of the algorithm set out at
* http://natureofcode.com/book/chapter-10-neural-networks/ ,
* but without graphics - ; classes assumed to be 1, -1
* http://www.rosettacode.org/wiki/Perceptron#Pascal
*)
type TAofReal= array[0..2] of real;
function feedForward(ins: array of integer; ws: array of real):
integer; forward;
function targetOutput( a, b: integer ): integer;
(* the function a perceptron will be learning is f(x) = 2x + 1 *)
// split in two classes : negative & positive against value b
begin
if a * 2 + 1 < b then begin
result:= 1
//writeln('true ' +inttostr((a*2)+1)+' < '+inttostr(b))
end
else begin
result:= -1;
//writeln('false '+inttostr((a*2)+1)+' < '+inttostr(b))
end;
end;
procedure showTargetOutput(var refval: string);
var x, y : integer;
begin
for y:= 10 downto -9 do
begin
for x:= -9 to 10 do
if targetOutput( x,y )= 1 then begin
write( '#' )
refval:= refval +'#'
end else begin
write( 'O' );
refval:= refval +'O'
end;
writeln('')
end;
writeln('')
end;
procedure showOutput( ws : array of real; var resval: string );
var inputs : array[0..2] of integer;
x, y : integer;
tmpstr: string;
begin
inputs[2]:= 1; (* bias *)
tmpstr:=''; resval:='';
for y:= 10 downto-9 do
begin
for x:=-9 to 10 do
begin
inputs[0]:= x;
inputs[1]:= y;
if feedForward( inputs, ws ) = 1 then
tmpstr:= tmpstr+'#'
else tmpstr:= tmpstr +'O';
//writeln(tmpstr1)
end;
writeln(tmpstr)
resval:= resval + tmpstr;
tmpstr:='';
end;
writeln('')
end;
procedure randomWeights( var ws : TAofReal );
(* start with random weights -- NB pass by reference *)
var i : integer;
begin
randomize; (* seed random-number generator *)
for i:= 0 to 2 do
ws[i]:= randomF * 2- 1;
end;
function feedForward(ins: array of integer; ws: array of real):
integer;
(* the perceptron outputs 1 if the sum of its inputs multiplied by
its input weights is positive, otherwise -1 *)
var sum: real; i : integer;
begin
sum:= 0;
for i:= 0 to 2 do
sum:= sum+ ins[i] * ws[i];
if sum > 0 then
result:= 1
else result:= -1
end;
procedure train( var ws : TAofReal; runs : integer );
(* pass the array of weights by reference so it can be modified *)
var inputs: array[0..2] of integer;
error: real;
x,y, i,j : integer;
begin
inputs[2]:= 1; (* bias *)
for i:= 1 to runs do
begin
for y:= 10 downto -9 do
begin
for x:= -9 to 10 do begin
inputs[0]:= x;
inputs[1]:= y;
error:= targetOutput(x, y )
- feedForward( inputs, ws);
{print(floattostr(error)+': '+itoa(targetoutput(x,y))+
' '+itoa(feedforward( inputs, ws))+'/ ') }
for j:= 0 to 2 do
ws[j]:= ws[j]+ error * inputs[j]* 0.01;
(* 0.01 is the learning constant *)
end;
end;
end;
end;
procedure testAll(const ws : TAofReal; runs : integer );
var inputs: array[0..2] of integer;
x,y, i,j: integer;
outputs: integer;
begin
inputs[2]:= 1; (* bias *)
for i:= 1 to runs do begin
for y:= 10 downto -9 do begin
for x:= -9 to 10 do begin
inputs[0]:= x;
inputs[1]:= y;
for j:= 0 to 2 do begin
outputs:= outputs+ round(ws[j]* inputs[j]);
end;
//print(itoa(outputs)+', ')
if outputs >= 1 then print('#') else write('O');
outputs:= 0;
end;
writeln('')
end;
end;
end;
procedure Predict(const ws : TAofReal; a,b: integer);
var inputs: array[0..2] of integer;
outputs, j: integer;
astr: string;
begin
inputs[2]:= 1; (* bias *)
inputs[0]:= a;
inputs[1]:= b;
for j:= 0 to 2 do
outputs:= outputs+ round(ws[j]* inputs[j]);
if ((a*2)+1) < b then astr:=('pos') else astr:=('neg');
println(itoa(outputs)+', '+astr)
outputs:= 0;
end;
//var weights : array[0..2] of real;
var weights: TAofReal;
refval, resval: string;
p,q: integer;
begin //@main
writeln( 'Target output for the function f(x) = 2x + 1:' );
showTargetOutput(refval);
randomWeights(( weights) );
writeln( 'Output from untrained perceptron:' );
showOutput( weights, resval );
for it:= 0 to 2 do print(floattostr(weights[it])+', ');
writeln('--------------------------------------')
train( weights, 1 );
writeln( 'Output from perceptron after 1 training run:' );
showOutput( weights, resval );
for it:= 0 to 2 do print(floattostr(weights[it])+', ');
writeln('--------------------------------------')
train( weights, 14 );
writeln( 'Output from perceptron after 14 training runs:' );
showOutput( weights, resval )
for it:= 0 to 2 do printF('weights %10.4f ',[weights[it]]);
writeln('--------------------------------------')
if strcompare(refval,resval)= 0 then
writeln('accuracy ~100') else
writeln('accuracy NOT 100');
writeln('')
writeln('Now testing values-----------')
testAll( weights, 1 );
writeln('Predict values---------------')
Predict( weights, 15,5);
Writeln('')
writeln('Predict line values----------')
for p:= -9 to 10 do
//for q:= -9 to 10 do
Predict(weights, p, p)
//writeln(resval)
//writeln(refval)
End.
ref: delta list
err(0,2)-targ(-1,1)-feed(-1,1)
-----------------------
0 : 1 1
0 :-1 -1
2 : 1 -1
-2 :-1 1
optimal weights by 4000 runs
weights -1.0114
weights 0.5006
weights -0.5560
--------------------------------------
ref:
https://www.academia.edu/36608990/TensorFlow_AI_Demo
https://www.scribd.com/document/378905755/tensorflow-machinelearning-task9
doc:
This is a text-based implementation, using a 20x20 grid (just like the original Mark 1 Perceptron had).
The rate of improvement drops quite markedly as you increase the number of training runs.
A perceptron is an algorithm used in machine-learning. It's the simplest of all neural networks, consisting of only one neuron,
and is typically used for pattern recognition.
A perceptron attempts to separate input into a positive and a negative class with the aid of a linear function.
The inputs are each multiplied by weights, random weights at first, and then summed.
Based on the sign of the sum a decision is made.
In order for the perceptron to make the right decision, it needs to train with input for which the correct outcome is known,
so that the weights can slowly be adjusted until they start producing the desired results.
----app_template_loaded_code----
----File newtemplate.txt not exists - now saved!----