cascor1.lisp 54.3 KB
Newer Older
ram's avatar
ram committed
1
;;; -*- Mode: Lisp; Package: User -*-
ram's avatar
ram committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
;;; ***************************************************************************
;;; Common Lisp implementation of Cascade-Correlation learning algorithm.
;;; This version for export.  Non-portable user-interface stuff excised.
;;; 
;;; Written by:   Scott E. Fahlman
;;;               School of Computer Science
;;;               Carnegie-Mellon University
;;;               Pittsburgh, PA 15217
;;;
;;;               Phone: (412) 268-2575
;;;               Internet: fahlman@cs.cmu.edu
;;;
;;; This code has been placed in the public domain by the author.  As a
;;; matter of simple courtesy, anyone using or adapting this code is
;;; expected to acknowledge the source.  The author would like to hear
;;; about any attempts to use this system, successful or not.
;;;
;;; For an explanation of this algorithm and some results, see "The
;;; Cascade-Correlation Learning Architecture" by Scott E. Fahlman and
;;; Christian Lebiere in D. S. Touretzky (ed.), "Advances in Neural
;;; Information Processing Systems 2", Morgan Kaufmann, 1990.  A somewhat
;;; longer version is available as CMU Computer Science Tech Report
;;; CMU-CS-90-100.
;;;
;;; ***************************************************************************
;;; EDIT HISTORY SINCE FIRST RELEASE:
;;;
;;; 8/24/90:
;;; Modified TEST-EPOCH so that it wouldn't mess up error statistics being
;;; passed from the output-training to input-training phase.  Thanks to
;;; Scott Crowder for spotting this.
;;; 
;;; 6/1/90:
;;; Fixed bug in INSTALL-NEW-UNIT.  New unit's initial weight was being
;;; computed using *CAND-COR* values of the successful candidate, which is
;;; already zero.  Now uses *CAND-PREV-COR*.  Thanks to Tim Howells for
;;; spotting this.
;;;
;;; Modify BUILD-NET to check that *MAX-UNITS* is large enough.  A couple of
;;; people got mysterious failures the first time they used lots of inputs.
;;;
;;; Made a small change in QUICKPROP-UPDATE to prevent rare divide-by-zero
;;; errors when p = s = 0.0.  Thanks to Tom Dietterich.
;;; 
;;; Added CHANGED-TRAINING-SET, which should be called when the training set
;;; is changed but you don't want to reinitialize the net.  This rebuilds
;;; the caches.
ram's avatar
ram committed
49 50 51 52
;;; 
;;; 11/9/90:
;;; Added some additional type declarations for maximum speed under certain
;;; Common Lisp compilers.
ram's avatar
ram committed
53
;;; ***************************************************************************
ram's avatar
ram committed
54 55
;;;
(in-package "USER")
ram's avatar
ram committed
56

ram's avatar
ram committed
57 58
;;; This proclamation buys a certain amount of overall speed at the expense
;;; of runtime checking.  Comment it out when debugging new, bug-infested code.
ram's avatar
ram committed
59
;;;
60
#+declare-unsafe
ram's avatar
ram committed
61 62 63 64 65
(proclaim '(optimize (speed 3) (space 0) (safety 0)))

;;; Style note: Because some of these runs take a long time, this code is
;;; extensively hacked for good performance under a couple of Common Lisp
;;; systems, some of which have poor performance on multi-dimensional
ram's avatar
ram committed
66
;;; arrays and some of which have weak type-inference in the compiler.
ram's avatar
ram committed
67 68 69 70 71 72 73 74 75 76 77 78 79 80
;;; Elegance and clarity have in some cases been sacrificed for speed.

;;; In some problems, floating point underflow errors may occur as a result
;;; of weight-decay and other operations.  Most Common Lisp implementations
;;; have an option to turn floating underflows into zero values without
;;; signalling an error.  You should enable this facility if it is
;;; available.  If not, you'll either have to write a condition handler for
;;; floating underflows or live with the occasional underflow error.

;;; In CMU Common Lisp, we use the following incantation:
;;;     (setq extensions:*ignore-floating-point-underflow* t)


;;; Compensate for the clumsy Common Lisp declaration system and weak
ram's avatar
ram committed
81 82 83 84 85
;;; type-inference in some Common Lisp compilers.

;;; INCF-SF, *SF, etc. are like INCF, *, etc., but they declare their
;;; operands and results to be short-floats.  The code gets unreadable
;;; quickly if you insert all these declarations by hand.
ram's avatar
ram committed
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

(defmacro incf-sf (place &optional (increment 1.0))
  `(the short-float (incf (the short-float ,place)
			  (the short-float ,increment))))

(defmacro decf-sf (place &optional (increment 1.0))
  `(the short-float (decf (the short-float ,place)
			  (the short-float ,increment))))

(defmacro *sf (&rest args)
  `(the short-float
	(* ,@(mapcar #'(lambda (x) (list 'the 'short-float x)) args))))

(defmacro +sf (&rest args)
  `(the short-float
	(+ ,@(mapcar #'(lambda (x) (list 'the 'short-float x)) args))))

(defmacro -sf (&rest args)
  `(the short-float
	(- ,@(mapcar #'(lambda (x) (list 'the 'short-float x)) args))))

(defmacro /sf (&rest args)
  `(the short-float
	(/ ,@(mapcar #'(lambda (x) (list 'the 'short-float x)) args))))

ram's avatar
ram committed
111 112 113 114 115 116
;;; DOTIMES1 is like DOTIMES, only with the loop counter declared as a
;;; fixnum.  This is for compilers with weak type inference.

(defmacro dotimes1 (form1 &body body)
  `(dotimes ,form1 (declare (fixnum ,(car form1))) . ,body))

ram's avatar
ram committed
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
;;; Create vector-access forms similar to SVREF, but for vectors of
;;; element-type SHORT-FLOAT and FIXNUM.

(eval-when (compile load eval)
  (defconstant fvector-type
    (array-element-type (make-array '(1) :element-type 'short-float)))
  (defconstant ivector-type
    (array-element-type (make-array '(1) :element-type 'fixnum))))

(defmacro fvref (a i)
  "Like SVREF, but with vectors of element-type SHORT-FLOAT."
  (if (eq fvector-type t)
    `(the short-float (svref ,a ,i))
    `(the short-float
	  (aref (the (simple-array ,fvector-type (*)) ,a) ,i))))

(defmacro ivref (a i)
  "Like SVREF, but with vectors of element-type FIXNUM."
  (if (eq ivector-type t)
    `(the fixnum (svref ,a ,i))
    `(the fixnum
	  (aref (the (simple-array ,ivector-type (*)) ,a) ,i))))


;;;; Assorted Parameters and Controls.

;;; Thse parameters and switches control the quickprop learning algorithm
;;; used to train the output weights and candidate units.

(defvar *unit-type* :sigmoid
  "The type of activation function used by the hidden units.  Options
  currently implemented are :sigmoid, :asigmoid, and :gaussian.  Sigmoid is
  symmetric in range -0.5 to +0.5, while Asigmoid is asymmetric, 0.0 to
  1.0.")

(defvar *output-type* :sigmoid
  "The activation function to use on the output units.  Options currently
  implemented are :linear and :sigmoid.")

(defvar *raw-error* nil
  "If T, candidate units will try to correlate with the raw difference
  between actual and desired outputs.  Else, they use the difference modified
  by the derivative of the output unit activation function.")

(defvar *sigmoid-prime-offset* 0.1
  "This is added to the derivative of the sigmoid function to prevent the
  system from getting stuck at the points where sigmoid-prime goes to
  zero.")
(proclaim '(short-float *sigmoid-prime-offset*))

(defvar *weight-range* 1.0
  "Input weights in the network get inital random values between plus and
  minus *weight-range*.  This parameter also controls the initial weights
  on direct input-to-output links.")
(proclaim '(short-float *weight-range*))

(defvar *weight-multiplier* 1.0
  "The output weights for cadidate units get an initial value that is the
  negative of the correlation times this factor.")
(proclaim '(short-float *weight-multiplier*))

(defvar *output-mu* 2.0
  "Mu parmater used for quickprop training of output weights.  The
  step size is limited to mu times the previous step.")
(proclaim '(short-float *output-mu*))

(defvar *output-shrink-factor* (/ *output-mu* (+ 1.0 *output-mu*))
  "Derived from *output-mu*.  Used in computing whether the proposed step is
  too large.")
(proclaim '(short-float *output-shrink-factor*))

(defvar *output-epsilon* 0.35
  "Controls the amount of linear gradient descent to use in updating
  output weights.")
(proclaim '(short-float *output-epsilon*))

(defvar *output-decay* 0.0001
  "This factor times the current weight is added to the slope at the
  start of each output-training epoch.  Keeps weights from growing too big.")
(proclaim '(short-float *output-decay*))

(defvar *output-patience* 8
  "If we go for this many epochs with no significant change, it's time to
  stop tuning.  If 0, go on forever.")
(proclaim '(fixnum *output-patience*))

(defvar *output-change-threshold* 0.01
  "The error must change by at least this fraction of its old value in
  order to count as a significant change.")
(proclaim '(short-float *output-change-threshold*))

(defvar *input-mu* 2.0
  "Mu parmater used for quickprop training of input weights.  The
  step size is limited to mu times the previous step.")
(proclaim '(short-float *input-mu*))

(defvar *input-shrink-factor* (/ *input-mu* (+ 1.0 *input-mu*))
  "Derived from *input-mu*.  Used in computing whether the proposed step is
  too large.")
(proclaim '(short-float *input-shrink-factor*))

(defvar *input-epsilon* 1.0
  "Controls the amount of linear gradient descent to use in updating
  unit input weights.")
(proclaim '(short-float *input-epsilon*))

(defvar *input-decay* 0.0
  "This factor times the current weight is added to the slope at the
  start of each output-training epoch.  Keeps weights from growing too big.")
(proclaim '(short-float *input-decay*))

(defvar *input-patience* 8
  "If we go for this many epochs with no significant change, it's time to
  stop tuning.  If 0, go on forever.")
(proclaim '(fixnum *input-patience*))

(defvar *input-change-threshold* 0.03
  "The correlation score for the best unit must change by at least
  this fraction of its old value in order to count as a significant
  change.")
(proclaim '(short-float *input-change-threshold*))

;;; Variables related to error and correlation.

(defvar *score-threshold* 0.4
  "An output is counted as correct for a given case if the difference
  between that output and the desired value is smaller in magnitude than
  this value.")
(proclaim '(short-float *score-threshold*))

(defvar *error-bits* 0
  "Count number of bits in epoch that are wrong by more than
  *SCORE-THRESHOLD*")
(proclaim '(fixnum *error-bits*))

(defvar *true-error* 0.0
  "The sum-squared error at the network outputs.  This is the value the
  algorithm is ultimately trying to minimize.")
(proclaim '(short-float *true-error*))

(defvar *sum-error* 0.0
  "Accumulate the sum of the error values after output training phase.")
(proclaim '(short-float *sum-error*))

(defvar *sum-sq-error* 0.0
  "Accumulate the sum of the squared error values after output
  training phase.")
(proclaim '(short-float *sum-sq-error*))

(defvar *avg-error* 0.0
  "Holds the average of error values after output training phase.")
(proclaim '(short-float *avg-error*))

(defvar *best-candidate-score* 0.0
  "The best correlation score found among all candidate units being
  trained.")
(proclaim '(short-float *best-candidate-score*))

(defvar *best-candidate* 0
  "The index of the candidate unit whose correlation score is best
  at present.")
(proclaim '(fixnum *best-candidate*))

;;; These variables and switches control the simulation and display.

(defvar *use-cache* t
  "If T, cache the forward-pass values instead of repeatedly
  computing them.  This can save a *lot* of time if all the cached values
  fit into memory.")

(defparameter *epoch* 0
  "Count of the number of times the entire training set has been presented.")
(proclaim '(fixnum *epoch*))

(defvar *test* nil
  "If T, run a test epoch every so often during output training.")

(defvar *test-interval* 0
  "Run a test epoch every *test-interval* output training cycles.")
(proclaim '(fixnum *test-interval*))

(defvar *single-pass* nil
  "When on, pause after next forward/backward cycle.")

(defvar *single-epoch* nil
  "When on, pause after next training epoch.")

(defparameter *step* nil
  "Turned briefly to T in order to continue after a pause.")

;;; The sets of training inputs and outputs are stored in parallel vectors.
ram's avatar
ram committed
308 309 310
;;; Each element is a SIMPLE-VECTOR holding short-float values, one for
;;; each input or output.  Note: this is a simple vector, not a specialized
;;; vector of element-type short-float.
ram's avatar
ram committed
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

(defvar *training-inputs* (make-array 0)
  "Vector of input patterns for training the net.")
(proclaim '(simple-vector *training-inputs*))

(defvar *training-outputs* (make-array 0)
  "Vector of output patterns for training the net.")
(proclaim '(simple-vector *training-outputs*))

(defvar *goal* (make-array 0)
  "The goal vector for the current training or testing case.")
(proclaim '(simple-vector *goal*))

(defvar *max-cases* 0
  "Maximum number of training cases that can be accommdated by the current
  data structures.")
(proclaim '(fixnum *max-cases*))

(defvar *ncases* 0
  "Number of training cases currently in use.  Assume a contiguous block
  beginning with *FIRST-CASE*.")
(proclaim '(fixnum *ncases*))

(defvar *first-case* 0
  "Address of the first training case in the currently active set.  Usually
  zero, but may differ if we are training on different chunks of the training
  set at different times.")
(proclaim '(fixnum *first-case*))

;;; For some benchmarks there is a separate set of values used for testing
;;; the network's ability to generalize.  These values are not used during
;;; training.

344
(defvar *test-inputs* '#()
ram's avatar
ram committed
345
  "Vector of input patterns for testing the net.")
ram's avatar
ram committed
346
(proclaim '(simple-vector *test-inputs*))
ram's avatar
ram committed
347

348
(defvar *test-outputs* '#()
ram's avatar
ram committed
349
  "Vector of output patterns for testing the net.")
ram's avatar
ram committed
350
(proclaim '(simple-vector *test-outputs*))
ram's avatar
ram committed
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563


;;;; Fundamental data structures.

;;; Unit values and weights are short flonums.

;;; Instead of representing each unit by a structure, we represent the
;;; unit by a fixnum.  This is used to index into various vectors that hold
;;; per-unit information, such as the activation value of each unit.
;;; So the information concerning a given unit is found in a slice of values
;;; across many vectors, all with the same unit-index.

;;; Per-connection information for each connection COMING INTO unit is
;;; stored in a vector of vectors.  The outer vector is indexed by the unit
;;; number, and the inner vector is then indexed by connection number.
;;; This is a sleazy way of implementing a 2-D array, faster in most Lisp
;;; systems than multiplying to do the index arithmetic, and more efficient
;;; if the units are sparsely connected.

;;; Unit 0, the "bias unit" is always at a maximum-on value.  Next come
;;; some input "units", then some hidden units.

;;; Output units have their own separate set of data structures and
;;; indices.  The units and outputs together form the "active" network.
;;; There are also separate data structures and indices for the "candidate"
;;; units that have not yet been added to the network.

(defvar *max-units* 30
  "Maximum number of input values and hidden units in the network.")
(proclaim '(fixnum *max-units*))

(defvar *ninputs* 0
  "Number of inputs for this problem.")
(proclaim '(fixnum *ninputs*))

(defvar *noutputs* 0
  "Number of outputs for this problem.")
(proclaim '(fixnum *noutputs*))

(defvar *nunits* 0
  "Current number of active units in the network.  This count includes all
  inputs to the network and the bias unit.")
(proclaim '(fixnum *nunits*))

(defvar *ncandidates* 8
  "Number of candidate units whose inputs will be trained at once.")
(proclaim '(fixnum *ncandidates*))

;;; The following vectors hold values related to hidden units in the active
;;; net and their input weights.  The vectors are created by BUILD-NET, after
;;; the dimension variables have been set up.

(defvar *values* nil
  "Vector holding the current activation value for each unit and input in
  the active net.")

(defvar *values-cache* nil
  "Holds a distinct *VALUES* vector for each of the *MAX-CASES* training
  cases.  Once we have computed the *VALUES* vector for each training case,
  we can use it repeatedly until the weights or training cases change.")

(defvar *extra-values* nil
  "Extra values vector to use when not using the cache.")

;;; Note: the *NCONNECTIONS* and *CONNECTIONS* vectors could be eliminated
;;; if we wanted to commit to total connectivity for all units.
;;; For now, we want to allow for sparse or irregular connectivity.

(defvar *nconnections* nil
  "Vector holding the number of incoming connections for each unit.")

(defvar *connections* nil
  "Vector that holds a connection vector for each unit J.
  Each entry in the connection vector holds a unit index I,
  indicating that this connection is from I to J.")

(defvar *weights* nil
  "Vector of vectors with structure parallel to the *connections* vector.
  Each entry gives the weight associated with an incoming connection.")

;;; The following vectors hold values for the outputs of the active
;;; network and the output-side weights.

(defvar *outputs* nil
  "Vector holding the network output values.")

(defvar *errors* nil
  "Vector holding the current error value for each output.")

(defvar *errors-cache* nil
  "Holds a distinct *ERRORS* vector for each of the *MAX-CASES* training
  cases.  Once we have computed the *ERRORS* vector for a given training
  case, we can use it repeatedly until the weights of the training cases
  change.")

(defvar *extra-errors* nil
  "Extra errors vector to use when not using the cache.")

(defvar *output-weights* nil
  "Vector of vectors.  For each output, we have a vector of output weights
  coming from the unit indicated by the index.")

(defvar *output-deltas* nil
  "Vector of vectors, parallel with output weights.  Each entry is the
  amount by which the corresponding output weight was changed last time.")

(defvar *output-slopes* nil
  "Vector of vectors, parallel with output weights.  Each entry is the
  partial derivative of the total error with repsect to the corresponding
  weight.")

(defvar *output-prev-slopes* nil
  "Vector of vectors, parallel with output weights.  Each entry is the
  previous value of the corresponding *OUTPUT-SLOPES* entry.")

(defvar *output-weights-record* nil
  "The vector of output weights is recorded here after each output-training
  phase and just prior to the addition of the next unit.  This record
  allows us to reconstruct the network's performance at each of these
  points in time.")

;;; The following vectors have one entry for each candidate unit in the
;;; pool of trainees.

(defvar *cand-sum-values* nil
  "For each candidate unit, the sum of its values over an entire
  training set.")

(defvar *cand-cor* nil
  "A vector with one entry for each candidate unit.  This entry is a vector
  that holds the correlation between this unit's value and the residual
  error at each of the outputs, computed over a whole epoch.")

(defvar *cand-prev-cor* nil
  "Holds the *cand-cor* values computed in the previous candidate training
  epoch.")

(defvar *cand-weights* nil
  "A vector with one entry for each candidate unit.  This entry is a vector
  that holds the current input weights for that candidate unit.")

(defvar *cand-deltas* nil
  "A vector with one entry for each candidate unit.  This entry is a vector
  that holds the input weights deltas for that candidate unit.")

(defvar *cand-slopes* nil
  "A vector with one entry for each candidate unit.  This entry is a vector
  that holds the input weights slopes for that candidate unit.")

(defvar *cand-prev-slopes* nil
  "A vector with one entry for each candidate unit.  This entry is a vector
  that holds the previous values of the input weight slopes for that
  candidate unit.")

;;; At present, each candidate receives a connection from every input and
;;; pre-existing unit.  Rather than cons up a new *connections* vector for
;;; each of these, we can just use this one for all of them.

(defvar *all-connections* nil
  "A *CONNECTIONS* vector that can be used by any unit that connects to
  all lower-numbered units, in order.")


;;;; Network-building utilities.

(defun build-net (ninputs noutputs)
  "Create the network data structures, given the number of input and output
  connections.  Get *MAX-UNITS* and other dimesntions from variables."
  (declare (fixnum ninputs noutputs))
  ;; Check to make sure *MAX-UNITS* is big enough.
  (unless (> *max-units* (+ ninputs 1))
    (error "*MAX-UNITS* must be greater than number of inputs plus 1."))
  ;; Fill in assorted variables and create top-level vectors.
  (setq *ninputs* ninputs
	*noutputs* noutputs
	*max-cases* (length *training-inputs*)
	*ncases* *max-cases*
	*first-case* 0
	*nunits* (+ 1 *ninputs*)
	*values-cache* (make-array *max-cases* :initial-element nil)
	*extra-values* (make-array *max-units*
				   :element-type 'short-float
				   :initial-element 0.0)
	*values* *extra-values*
	*nconnections* (make-array *max-units*
				   :element-type 'fixnum
				   :initial-element 0)
	*connections* (make-array *max-units* :initial-element nil)
	*weights* (make-array *max-units* :initial-element nil)
	*outputs* (make-array *noutputs*
			      :element-type 'short-float
			      :initial-element 0.0)
	*errors-cache* (make-array *max-cases* :initial-element nil)
	*extra-errors* 	(make-array *noutputs*
				    :element-type 'short-float
				    :initial-element 0.0)
	*errors* *extra-errors*
	*output-weights* (make-array *noutputs* :initial-element nil)
	*output-weights-record* (make-array *max-units* :initial-element nil)
	*output-deltas* (make-array *noutputs* :initial-element nil)
	*output-slopes* (make-array *noutputs* :initial-element nil)
	*output-prev-slopes* (make-array *noutputs* :initial-element nil)
	*cand-sum-values* (make-array *ncandidates*
				      :element-type 'short-float
				      :initial-element 0.0)
	*cand-cor* (make-array *ncandidates* :initial-element nil)
	*cand-prev-cor* (make-array *ncandidates* :initial-element nil)
	*cand-weights* (make-array *ncandidates* :initial-element nil)
	*cand-deltas* (make-array *ncandidates* :initial-element nil)
	*cand-slopes* (make-array *ncandidates* :initial-element nil)
	*cand-prev-slopes* (make-array *ncandidates* :initial-element nil))
  ;; Only create the caches if *USE-CACHE* is on -- may not always have room.
  (when *use-cache*
ram's avatar
ram committed
564
    (dotimes1 (i *max-cases*)
ram's avatar
ram committed
565 566 567 568 569 570 571 572 573
      (setf (svref *values-cache* i)
	    (make-array *max-units*
			:element-type 'short-float
			:initial-element 0.0))
      (setf (svref *errors-cache* i)
	    (make-array *noutputs*
			:element-type 'short-float
			:initial-element 0.0))))
  ;; For each output, create the vectors holding per-weight information.
ram's avatar
ram committed
574
  (dotimes1 (i *noutputs*)
ram's avatar
ram committed
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
    (setf (svref *output-weights* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *output-deltas* i)    
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *output-slopes* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *output-prev-slopes* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0)))
  ;; For each candidate unit, create the vectors holding the correlations,
  ;; incoming weights, and other stats.
ram's avatar
ram committed
593
  (dotimes1 (i *ncandidates*)
ram's avatar
ram committed
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
    (setf (svref *cand-cor* i)
	  (make-array *noutputs*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *cand-prev-cor* i)
	  (make-array *noutputs*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *cand-weights* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *cand-deltas* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *cand-slopes* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))
    (setf (svref *cand-prev-slopes* i)
	  (make-array *max-units*
		      :element-type 'short-float
		      :initial-element 0.0))))

(defun random-weight ()
  "Select a random weight, uniformly distributed over the
  interval from minus to plus *weight-range*."
  (-sf (random (*sf 2.0 *weight-range*)) *weight-range*))

(defun init-net ()
  "Set up the network for a learning problem.  Clean up all the data
  structures that may have become corrupted.  Initialize the output weights
  to random values controlled by *weight-range*."
  ;; Set up the *ALL-CONNECTIONS* vector.
  (setq *all-connections*
	(make-array *max-units* :element-type 'fixnum))
ram's avatar
ram committed
631
  (dotimes1 (i *max-units*)
ram's avatar
ram committed
632 633
    (setf (ivref *all-connections* i) i))
  ;; Initialize the active unit data structures.
ram's avatar
ram committed
634
  (dotimes1 (i *max-units*)
ram's avatar
ram committed
635 636 637 638 639 640
    (setf (fvref *extra-values* i) 0.0)
    (setf (ivref *nconnections* i) 0)
    (setf (svref *connections* i) nil)
    (setf (svref *weights* i) nil)
    (setf (svref *output-weights-record* i) nil))
  ;; Initialize the per-output data structures.
ram's avatar
ram committed
641
  (dotimes1 (i *noutputs*)
ram's avatar
ram committed
642 643 644 645 646 647
    (setf (fvref *outputs* i) 0.0)
    (setf (fvref *extra-errors* i) 0.0)
    (let ((ow (svref *output-weights* i))
	  (od (svref *output-deltas* i))
	  (os (svref *output-slopes* i))
	  (op (svref *output-prev-slopes* i)))
ram's avatar
ram committed
648
      (dotimes1 (j *max-units*)
ram's avatar
ram committed
649 650 651 652 653
	(setf (fvref ow j) 0.0)
	(setf (fvref od j) 0.0)
	(setf (fvref os j) 0.0)
	(setf (fvref op j) 0.0))
      ;; Set up initial random weights for the input-to-output connections.
ram's avatar
ram committed
654
      (dotimes1 (j (1+ *ninputs*))
ram's avatar
ram committed
655 656 657
	(setf (fvref ow j) (random-weight)))))
  ;; Initialize the caches if they are in use.
  (when *use-cache*
ram's avatar
ram committed
658
    (dotimes1 (j *max-cases*)
ram's avatar
ram committed
659 660
      (let ((v (svref *values-cache* j))
	    (e (svref *errors-cache* j)))
ram's avatar
ram committed
661
	(dotimes1 (i *max-units*)
ram's avatar
ram committed
662
	  (setf (fvref v i) 0.0))
ram's avatar
ram committed
663
	(dotimes1 (i *noutputs*)
ram's avatar
ram committed
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
	  (setf (fvref e i) 0.0)))))	
  ;; Candidate units get initialized in a separate routine.
  (init-candidates)
  ;; Do some other assorted housekeeping.
  (setf (fvref *extra-values* 0) 1.0)
  (setq *epoch* 0)
  (setq *nunits* (+ 1 *ninputs*))
  (setq *error-bits* 0)
  (setq *true-error* 0.0)
  (setq *sum-error* 0.0)
  (setq *sum-sq-error* 0.0)
  (setq *best-candidate-score* 0.0)
  (setq *best-candidate* 0))

(defun changed-training-set ()
  "Call this instead of BUILD-NET and INIT-NET if you want to leave
  existing hidden units in place and start from here with new training
  examples.  Assumes that the number of net inputs and outputs remains the
  same, but the number of cases may have changed.  Rebuilds the caches."
  (setq *max-cases* (length *training-inputs*)
	*ncases* *max-cases*
	*first-case* 0
	*values-cache* (make-array *max-cases* :initial-element nil)
	*errors-cache* (make-array *max-cases* :initial-element nil))
  ;; Only create the caches if *USE-CACHE* is on -- may not always have room.
  (when *use-cache*
ram's avatar
ram committed
690
    (dotimes1 (i *max-cases*)
ram's avatar
ram committed
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
      (setf (svref *errors-cache* i)
	    (make-array *noutputs*
			:element-type 'short-float
			:initial-element 0.0))
      (setq *values* (make-array *max-units*
				 :element-type 'short-float
				 :initial-element 0.0))
      (setf (svref *values-cache* i) *values*)
      (set-up-inputs (svref *training-inputs* i))
      (do ((j (1+ *ninputs*) (1+ j)))
	  ((= j *nunits*))
	(declare (fixnum j))
	(compute-unit-value j)))))

;;;; Utilities for learning.

(proclaim '(inline activation activation-prime))

(defun activation (sum)
  "Given the sum of weighted inputs, compute the unit's activation value.
  Defined unit types are :sigmoid, :asigmoid, and :gaussian."
  (declare (short-float sum))
  (ecase *unit-type*
    (:sigmoid
     ;; Symmetric sigmoid function in range -0.5 to +0.5.
     (cond ((< sum -15.0) -0.5)
	   ((> sum 15.0) +0.5)
	   (t (-sf (/sf (+sf 1.0 (exp (-sf sum)))) 0.5))))
    (:asigmoid
     ;; Asymmetric sigmoid in range 0.0 to 1.0.
     (cond ((< sum -15.0) 0.0)
	   ((> sum 15.0) 1.0)
ram's avatar
ram committed
723
	   (t (/sf 1.0 (+sf 1.0 (exp (-sf sum)))))))
ram's avatar
ram committed
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
    (:gaussian
     ;; Gaussian activation function in range 0.0 to 1.0.
     (let ((x (*sf -0.5 sum sum)))
       (if (< x -75.0) 0.0 (exp x))))))

;;; Note: do not use *SIGMOID-PRIME-OFFSET* here, as it confuses the
;;; correlation machinery.  But do use it in output-prime, since it does no
;;; harm there and the output units often get stuck at extreme values.

(defun activation-prime (value sum)
  "Given the unit's activation value and sum of weighted inputs, compute
  the derivative of the activation with respect to the sum.  Defined unit
  types are :sigmoid, :asigmoid, and :gaussian."
  (declare (short-float value sum))
  (ecase *unit-type*
    (:sigmoid
     (-sf 0.25 (*sf value value)))
    (:asigmoid
     (*sf value (-sf 1.0 value)))
    (:gaussian
     (*sf (-sf value) sum))))

(proclaim '(inline output-function output-prime))

(defun output-function (sum)
  "Compute the value of an output, given the weighted sum of incoming values.
  Defined output types are :sigmoid and :linear."
  (declare (short-float sum))
  (ecase *output-type*
    (:sigmoid (cond ((< sum -15.0) -0.5)
		    ((> sum 15.0) +0.5)
ram's avatar
ram committed
755
		    (t (-sf (/sf 1.0 (+sf 1.0 (exp (-sf sum)))) 0.5))))
ram's avatar
ram committed
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
    (:linear sum)))

(defun output-prime (output)
  "Compute the derivative of an output with respect to the weighted sum of
  incoming values.  Defined output types are :sigmoid and :linear."
  (declare (short-float output))
  (ecase *output-type*
    (:sigmoid 
     (+sf *sigmoid-prime-offset* (-sf 0.25 (*sf output output))))
    (:linear 1.0)))

;;; The basic routine for doing Quickprop-style update of weights.
;;; Distilled essence of a year's work...

(proclaim '(inline quickprop-update))

(defun quickprop-update (i weights deltas slopes prevs
			   epsilon decay mu shrink-factor)
  "Given vectors holding weights, deltas, slopes, and previous slopes,
  and an index i, update weight(i) and delta(i) appropriately.  Move
  slope(i) to prev(i) and zero out slope(i).  Add weight decay term to
  each slope before doing the update."
  (let* ((w (fvref weights i))
	 (d (fvref deltas i))
	 (s (+sf (fvref slopes i) (*sf decay w)))
	 (p (fvref prevs i))
	 (next-step 0.0))
    (declare (short-float w p s d next-step)) 
    ;; The step must always be downhill.
    (cond
     ;; If last step was negative...
     ((minusp d)
      ;; First, add in linear term if current slope is still positive.
      (when (plusp s)
	(decf-sf next-step (*sf epsilon s)))
      (cond
       ;; If current slope is close to or larger than prev slope...
       ((>= s (*sf shrink-factor p))
	;; Take maximum size negative step.
	(incf-sf next-step (*sf mu d)))
       ;; Else, use quadratic estimate.
       (t (incf-sf next-step (*sf d (/sf s (-sf p s)))))))
     ;; If last step was positive...
     ((plusp d)
      ;; First, add in linear term if current slope is still negative.
      (when (minusp s)
	(decf-sf next-step (*sf epsilon s)))
      (cond
       ;; If current slope is close to or more neg than prev slope...
       ((<= s (*sf shrink-factor p))
	;; Take maximum size positive step.
	(incf-sf next-step (*sf mu d)))
       ;; Else, use quadratic estimate.
       (t (incf-sf next-step (*sf d (/sf s (-sf p s)))))))
     ;; Last step was zero, so use only linear term.
     (t (decf-sf next-step (*sf epsilon s))))
    ;; Having computed the next step, update the data vectors.
    (setf (fvref deltas i) next-step)
    (setf (fvref weights i) (+sf w next-step))
    (setf (fvref prevs i) s)
ram's avatar
ram committed
816 817
    (setf (fvref slopes i) 0.0)
    nil))
ram's avatar
ram committed
818 819 820 821 822 823 824 825 826


;;;; Machinery for training output weights.

(defun set-up-inputs (input)
  "Set up all the inputs from the INPUT vector as the first few entries in
  in the values vector."
  (declare (simple-vector input))
  (setf (fvref *values* 0) 1.0)
ram's avatar
ram committed
827
  (dotimes1 (i *ninputs*)
ram's avatar
ram committed
828 829 830 831 832 833
    (setf (fvref *values* (1+ i))
	  (the short-float (svref input i)))))

(defun output-forward-pass ()
  "Assume the *VALUES* vector has been set up.  Just compute the network's
  outputs."
ram's avatar
ram committed
834
  (dotimes1 (j *noutputs*)
ram's avatar
ram committed
835 836 837
    (let ((ow (svref *output-weights* j))
	  (sum 0.0))
      (declare (short-float sum))
ram's avatar
ram committed
838
      (dotimes1 (i *nunits*)
ram's avatar
ram committed
839 840 841 842 843 844 845 846 847 848 849 850
	(incf-sf sum (*sf (fvref *values* i) (fvref ow i))))
      (setf (fvref *outputs* j)
	    (output-function sum)))))

(defun compute-unit-value (j)
  "Assume that *VALUES* vector has been set up for all units with index less
  than J.  Compute and record the value for unit J."
  (declare (fixnum j))
  (let* ((c (svref *connections* j))
	 (w (svref *weights* j))
	 (sum 0.0))
    (declare (short-float sum))
ram's avatar
ram committed
851
    (dotimes1 (i (ivref *nconnections* j))
ram's avatar
ram committed
852 853
      (incf-sf sum (*sf (fvref *values* (ivref c i))
			(fvref w i))))
ram's avatar
ram committed
854 855
    (setf (fvref *values* j) (activation sum))
    nil))
ram's avatar
ram committed
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881

(defun full-forward-pass (input)
  "Set up the inputs from the INPUT vector, then propagate activation values
  forward through all hidden units and output units."
  (set-up-inputs input)
  ;; For each hidden unit J, compute the activation value.
  (do ((j (1+ *ninputs*) (1+ j)))
      ((= j *nunits*))
    (declare (fixnum j))
    (compute-unit-value j))
  ;; Now compute outputs.
  (output-forward-pass))

;;; Note: We fill the *ERRORS* vector and related statistics with either
;;; the raw error or the error after modification by output-prime,
;;; depending on the *RAW-ERROR* switch.  This controls what form of error
;;; the candidate units try to correlate with.  All experiments reported in
;;; TR CMU-CS-90-100 assume *RAW-ERROR* is NIL, but this might not always
;;; be the best choice.

(defun compute-errors (goal output-slopes-p stats-p)
  "GOAL is a vector of desired values for the output units.  Compute and
  record the output errors for the current training case.  If
  OUTPUT-SLOPES-P is T, then use errors to compute slopes for output
  weights.  If STATS-P is T, accumulate error statistics."
  (declare (simple-vector goal))
ram's avatar
ram committed
882
  (dotimes1 (j *noutputs*)
ram's avatar
ram committed
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
    (let* ((out (fvref *outputs* j))
	   (dif (-sf out (svref goal j)))
	   (err-prime (*sf dif (output-prime out)))
	   (os (svref *output-slopes* j)))
      (declare (short-float dif err-prime))
      (when stats-p
	(unless (< (abs dif) *score-threshold*)
	  (incf *error-bits*))
	(incf-sf *true-error* (*sf dif dif)))
      (cond (*raw-error*
	     (setf (fvref *errors* j) dif)      
	     (incf-sf *sum-error* dif)
	     (incf-sf *sum-sq-error* (*sf dif dif)))
	    (t
	     (setf (fvref *errors* j) err-prime)      
	     (incf-sf *sum-error* err-prime)
	     (incf-sf *sum-sq-error* (*sf err-prime err-prime))))
      (when output-slopes-p
ram's avatar
ram committed
901
	(dotimes1 (i *nunits*)
ram's avatar
ram committed
902 903 904 905 906 907 908 909 910 911 912
	  (incf-sf (fvref os i) (*sf err-prime (fvref *values* i))))))))

;;; Note: Scaling *OUTPUT-EPSILON* by the number of cases seems to keep the
;;; quickprop update in a good range across many different-sized training
;;; sets, but it's something of a hack.  Choosing good epsilon values
;;; still requires some trial and error.

(defun update-output-weights ()
  "Update the output weights, using the pre-computed slopes, prev-slopes,
  and delta values.  Uses the quickprop update function."
  (let ((eps (/ *output-epsilon* *ncases*)))
ram's avatar
ram committed
913
    (dotimes1 (j *noutputs*)
ram's avatar
ram committed
914 915 916 917
      (let ((ow (svref *output-weights* j))
	    (od (svref *output-deltas* j))
	    (os (svref *output-slopes* j))
	    (op (svref *output-prev-slopes* j)))
ram's avatar
ram committed
918
	(dotimes1 (i *nunits*)
ram's avatar
ram committed
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
	  (quickprop-update i ow od os op eps *output-decay*
			    *output-mu* *output-shrink-factor*))))))


;;;; Outer loops for training output weights.

(defun train-outputs-epoch ()
  "Perform forward propagation once for each set of weights in the
  training vectors, computing errors and slopes.  Then update the output
  weights."
  ;; Zero error accumulators.
  (setq *error-bits* 0)
  (setq *true-error* 0.0)
  (setq *sum-error* 0.0)
  (setq *sum-sq-error* 0.0)
  ;; User may have changed mu between epochs, so fix shrink-factor.
  (setq *output-shrink-factor*
	(/sf *output-mu* (+sf 1.0 *output-mu*)))
  ;; Now run through the training examples.
  (do ((i *first-case* (1+ i)))
      ((= i (the fixnum (+ *first-case* *ncases*))))
    (declare (fixnum i))
    (setq *goal* (svref *training-outputs* i))
    (cond (*use-cache*
	   (setq *values* (svref *values-cache* i))
	   (setq *errors* (svref *errors-cache* i))
	   (output-forward-pass))
	  (t (setq *values* *extra-values*)
	     (setq *errors* *extra-errors*)
	     (full-forward-pass (svref *training-inputs* i))))
    (compute-errors *goal* t t))
  ;; Do not change weights or count epoch if this run was perfect.
  (unless (= 0 *error-bits*)
    (update-output-weights)
    (incf *epoch*)))

(defun record-output-weights ()
  "Store the output weights developed after each output-training phase
  in the *ouput-weights-record* vector."
  (let ((record (make-array *noutputs* :initial-element nil)))
ram's avatar
ram committed
959
    (dotimes1 (o *noutputs*)
ram's avatar
ram committed
960 961 962
      (let ((original (svref *output-weights* o))
	    (copy (make-array *nunits* :element-type 'short-float
			      :initial-element 0.0)))
ram's avatar
ram committed
963
	(dotimes1 (u *nunits*)
ram's avatar
ram committed
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
	  (setf (fvref copy u) (fvref original u)))
	(setf (svref record o) copy)))
    (setf (svref *output-weights-record* (1- *nunits*)) record)))

(defun train-outputs (max-epochs)
  "Train the output weights.  If we exhaust MAX-EPOCHS, stop with value
  :TIMEOUT.  If there are zero error bits, stop with value :WIN.  Else,
  keep going until the true error has not changed by a significant amount
  for *OUTPUT-PATIENCE* epochs.  Then return :STAGNANT.  If
  *OUTPUT-PATIENCE* is zero, we do not stop until victory or until
  MAX-EPOCHS is used up."
  (declare (fixnum max-epochs))
  (let ((last-error 0.0)
	(quit-epoch (+ *epoch* *output-patience*))
	(first-time t))
    (declare (fixnum quit-epoch)
	     (short-float last-error))
ram's avatar
ram committed
981
    (dotimes1 (i max-epochs (progn
ram's avatar
ram committed
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
			     (record-output-weights)
			     :timeout))
      ;; Maybe run a test epoch to see how we're doing.
      (when (and *test*
		 (not (= 0 *test-interval*))
		 (= 0 (mod i *test-interval*)))
	   (test-epoch))
      (train-outputs-epoch)
      (cond ((zerop *error-bits*)
	     (record-output-weights)
	     (return :win))
	    ((zerop *output-patience*))
	    (first-time
	     (setq first-time nil)
	     (setq last-error *true-error*))
	    ((> (abs (- *true-error* last-error))
		(* last-error *output-change-threshold*))
	     (setq last-error *true-error*)
	     (setq quit-epoch (+ *epoch* *output-patience*)))
	    ((>= *epoch* quit-epoch)
	     (record-output-weights)
	     (return :stagnant))))))


;;;; Machinery for Training, Selecting, and Installing Candidate Units.

(defun init-candidates ()
  "Give new random weights to all of the candidate units.  Zero the other
  candidate-unit statistics."
ram's avatar
ram committed
1011
  (dotimes1 (i *ncandidates*)
ram's avatar
ram committed
1012 1013 1014 1015 1016 1017 1018
    (setf (fvref *cand-sum-values* i) 0.0)
    (let ((cw (svref *cand-weights* i))
	  (cd (svref *cand-deltas* i))
	  (cs (svref *cand-slopes* i))
	  (cp (svref *cand-prev-slopes* i))
	  (cc (svref *cand-cor* i))
	  (cpc (svref *cand-prev-cor* i)))
ram's avatar
ram committed
1019
      (dotimes1 (j *nunits*)
ram's avatar
ram committed
1020 1021 1022 1023
	(setf (fvref cw j) (random-weight))
	(setf (fvref cd j) 0.0)
	(setf (fvref cs j) 0.0)
	(setf (fvref cp j) 0.0))
ram's avatar
ram committed
1024
      (dotimes1 (o *noutputs*)
ram's avatar
ram committed
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	(setf (fvref cc o) 0.0)
	(setf (fvref cpc o) 0.0)))))

(defun install-new-unit ()
  "Add the candidate-unit with the best correlation score to the active
  network.  Then reinitialize the candidate pool."
  (when (>= *nunits* *max-units*)
    (error "Cannot add any more units."))
  ;; For now, assume total connectivity.
  (setf (ivref *nconnections* *nunits*) *nunits*)
  (setf (svref *connections* *nunits*) *all-connections*)
  ;; Copy the weight vector for the new unit.
  (let ((w (make-array *nunits* :element-type 'short-float))
	(cw (svref *cand-weights* *best-candidate*)))
ram's avatar
ram committed
1039
    (dotimes1 (i *nunits*)
ram's avatar
ram committed
1040 1041 1042 1043 1044 1045 1046 1047
      (setf (fvref w i) (fvref cw i)))
    (setf (svref *weights* *nunits*) w)
    ;; Tell user about the new unit.
    (format t "  Add unit ~S: ~S~%"
	    (+ 1 *nunits*) w))
  ;; Fix up output weights for candidate unit.
  ;; Use minus the correlation times the *weight-multiplier* as an
  ;; initial guess.  At least the sign should be right.
ram's avatar
ram committed
1048
  (dotimes1 (o *noutputs*)
ram's avatar
ram committed
1049 1050 1051 1052 1053
    (setf (fvref (svref *output-weights* o) *nunits*)
	  (*sf (-sf (fvref (svref *cand-prev-cor* *best-candidate*) o))
	       *weight-multiplier*)))
  ;; If using cache, run an epoch to compute this unit's values.
  (when *use-cache*
ram's avatar
ram committed
1054
    (dotimes1 (i *max-cases*)
ram's avatar
ram committed
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
      (setq *values* (svref *values-cache* i))
      (compute-unit-value *nunits*)))
  ;; Reinitialize candidate units with random weights.
  (incf *nunits*)
  (init-candidates))

;;; Note: Ideally, after each adjustment of the candidate weights, we would
;;; run two epochs.  The first would just determine the correlations
;;; between the candidate unit outputs and the residual error.  Then, in a
;;; second pass, we would adjust each candidate's input weights so as to
;;; maximize the absolute value of the correlation.  We need to know the
;;; sign of the correlation for each candidate-output pair so that we know
;;; which direction to tune the input weights.

;;; Since this ideal method doubles the number of epochs required for
;;; training candidates, we cheat slightly and use the correlation values
;;; computed BEFORE the most recent weight update.  This combines the two
;;; epochs, saving us almost a factor of two.  To bootstrap the process, we
;;; begin with a single epoch that computes only the correlation.

;;; Since we look only at the sign of the correlation and since that sign
;;; should change very infrequently, this probably is OK.  But keep a
;;; lookout for pathological situations in which this might cause
;;; oscillation.


;;; This function is used only once at the start of each output-training
;;; phase to prime the pump.  After that, each call to compute-slopes also
;;; computes the error-value products for the next epoch.

(defun compute-correlations ()
  "For the current training pattern, compute the value of each candidate
  unit and begin to compute the correlation between that unit's value and
  the error at each output.  We have already done a forward-prop and
  computed the error values for active units."
ram's avatar
ram committed
1090
  (dotimes1 (u *ncandidates*)
ram's avatar
ram committed
1091 1092 1093 1094 1095 1096
    (let ((sum 0.0)
	  (v 0.0)
	  (cw (svref *cand-weights* u))
	  (cc (svref *cand-cor* u)))
      (declare (short-float sum v))
      ;; Determine activation value of each candidate unit.
ram's avatar
ram committed
1097
      (dotimes1 (i *nunits*)
ram's avatar
ram committed
1098 1099 1100 1101 1102
	(incf-sf sum (*sf (fvref cw i)
			  (fvref *values* i))))
      (setq v (activation sum))
      (incf-sf (fvref *cand-sum-values* u) v)
      ;; Accumulate value of each unit times error at each output.
ram's avatar
ram committed
1103
      (dotimes1 (o *noutputs*)
ram's avatar
ram committed
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
	(incf-sf (fvref cc o) (*sf v (fvref *errors* o)))))))

;;; Note: When we were computing true correlations between candidates and
;;; outputs, this is where the normalization factors went in.  Currently we
;;; are just using covariances, as explained in the tech report.  So we
;;; make only two adjustments here.  First, we subtract out the product of
;;; the mean error and the mean candidate value to keep things from
;;; exploding when the error has a non-zero mean.  Second, we effectively
;;; scale the error values by the sum-squared error over all training
;;; cases.  This just keeps us from having to adjust *input-epsilon*
;;; repeatedly as the error is gradually reduced to a small fraction of its
;;; initial size.

(defun adjust-correlations ()
  "Normalize each accumulated correlation value, and stuff the normalized
  form into the *cand-prev-cor* data structure.  Then zero *cand-cor* to
  prepare for the next round.  Note the unit with the best total
  correlation score."
  (setq *best-candidate* 0)
  (setq *best-candidate-score* 0.0)
ram's avatar
ram committed
1124
  (dotimes1 (u *ncandidates*)
ram's avatar
ram committed
1125 1126 1127 1128 1129 1130
    (let* ((cc (svref *cand-cor* u))
	   (cpc (svref *cand-prev-cor* u))
	   (offset (*sf (fvref *cand-sum-values* u) *avg-error*))
	   (cor 0.0)
	   (score 0.0))
      (declare (short-float offset cor score))
ram's avatar
ram committed
1131
      (dotimes1 (o *noutputs*)
ram's avatar
ram committed
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	(setq cor (/sf (-sf (fvref cc o) offset) *sum-sq-error*))
	(setf (fvref cpc o) cor)
	(setf (fvref cc o) 0.0)
	(incf-sf score (abs cor)))
      ;; Keep track of the candidate with the best overall correlation.
      (when (> score *best-candidate-score*)
	(setq *best-candidate-score* score)
	(setq *best-candidate* u)))))

;;; This is the key function in the candidate training process.

(defun compute-slopes ()
  "Given the correlation values for each candidate-output pair, compute
  the derivative of the candidate's score with respect to each incoming
  weight."
ram's avatar
ram committed
1147
  (dotimes1 (u *ncandidates*)
ram's avatar
ram committed
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
    (let* ((sum 0.0)
	   (value 0.0)
	   (actprime 0.0)
	   (direction 0.0)
	   (cw (svref *cand-weights* u))
	   (cs (svref *cand-slopes* u))
	   (cc (svref *cand-cor* u))
	   (cpc (svref *cand-prev-cor* u)))
      (declare (short-float sum value actprime direction))
      ;; Forward pass through each candidate unit to compute activation-prime.
ram's avatar
ram committed
1158
      (dotimes1 (i *nunits*)
ram's avatar
ram committed
1159 1160 1161 1162 1163 1164
	(incf-sf sum (*sf (fvref cw i)
			  (fvref *values* i))))
      (setq value (activation sum))
      (setq actprime (activation-prime value sum))
      ;; Now compute which way we want to adjust each unit's incoming
      ;; activation.
ram's avatar
ram committed
1165
      (dotimes1 (o *noutputs*)
ram's avatar
ram committed
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	(let ((error (fvref *errors* o)))
	  (decf-sf direction
		   (*sf (if (minusp (fvref cpc o)) -1.0 1.0)
			(*sf actprime
			     (/sf (-sf error *avg-error*)
				  *sum-sq-error*))))
	  ;; Also accumulate the error-value products for use next epoch.
	  (incf-sf (fvref cc o) (*sf error value))))
      ;; Given the direction we want to push the candidate, compute
      ;; which way we want to tweak each incoming weight.
ram's avatar
ram committed
1176
      (dotimes1 (i *nunits*)
ram's avatar
ram committed
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	(incf-sf (fvref cs i)
		 (*sf direction (fvref *values* i)))))))

;;; Note: Scaling *INPUT-EPSILON* by the number of cases and number of
;;; inputs to each unit seems to keep the quickprop update in a good range,
;;; as the network goes from small to large, and across many
;;; different-sized training sets.  Still, choosing a good epsilon value
;;; requires some trial and error.

(defun update-input-weights ()
  "Update the input weights, using the pre-computed slopes, prev-slopes,
  and delta values.  Uses the quickprop update function."
  (let ((eps (/ *input-epsilon* (* *ncases* *nunits*))))
ram's avatar
ram committed
1190
    (dotimes1 (u *ncandidates*)
ram's avatar
ram committed
1191 1192 1193 1194
      (let ((cw (svref *cand-weights* u))
	    (cd (svref *cand-deltas* u))
	    (cs (svref *cand-slopes* u))
	    (cp (svref *cand-prev-slopes* u)))
ram's avatar
ram committed
1195
	(dotimes1 (i *nunits*)
ram's avatar
ram committed
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
	  (quickprop-update i cw cd cs cp eps *input-decay*
			    *input-mu* *input-shrink-factor*))))))

;;; Outer loop for training the candidate unit(s).

(defun train-inputs-epoch ()
  "For each training pattern, perform a forward pass.  Tune the candidate units'
  weights to maximize the correlation score of each."
  (do ((i *first-case* (1+ i)))
      ((= i (the fixnum (+ *first-case* *ncases*))))
    (declare (fixnum i))
    (setq *goal* (svref *training-outputs* i))
    ;; Compute values and errors, or recall cached values.
    (cond (*use-cache*
	   (setq *values* (svref *values-cache* i))
	   (setq *errors* (svref *errors-cache* i)))
	  (t (setq *values* *extra-values*)
	     (setq *errors* *extra-errors*)
	     (full-forward-pass (svref *training-inputs* i))
	     (compute-errors *goal* nil nil)))
    ;; Compute the slopes we will use to adjust candidate weights.
    (compute-slopes))
  ;; User may have changed mu between epochs, so fix shrink-factor.
  (setq *input-shrink-factor* (/sf *input-mu*
				   (+sf 1.0 *input-mu*)))
  ;; Now adjust the candidate unit input weights using quickprop.
  (update-input-weights)
  ;; Fix up the correlation values for the next epoch.
  (adjust-correlations)
  (incf *epoch*))

(defun correlations-epoch ()
  "Do an epoch through all active training patterns just to compute the
  initial correlations.  After this one pass, we will update the
  correlations as we train."
  (do ((i *first-case* (1+ i)))
      ((= i (the fixnum (+ *first-case* *ncases*))))
    (declare (fixnum i))
    (setq *goal* (svref *training-outputs* i))
    (cond (*use-cache*
	   (setq *values* (svref *values-cache* i))
	   (setq *errors* (svref *errors-cache* i)))
	  (t (setq *values* *extra-values*)
	     (setq *errors* *extra-errors*)
	     (full-forward-pass (svref *training-inputs* i))
	     (compute-errors *goal* nil nil)))
    (compute-correlations))
  (adjust-correlations)
  (incf *epoch*))

(defun train-inputs (max-epochs)
  "Train the input weights of all candidates.  If we exhaust MAX-EPOCHS,
  stop with value :TIMEOUT.  Else, keep going until the best candidate
  unit's score has changed by a significant amount, and then until it does
  not change significantly for PATIENCE epochs.  Then return :STAGNANT.  If
  PATIENCE is zero, we do not stop until victory or until MAX-EPOCHS is
  used up."
  (declare (fixnum max-epochs))
  (setq *avg-error* (/ *sum-error* (* *ncases* *noutputs*)))
  (correlations-epoch)
  (let ((last-score 0.0)
	(quit max-epochs)
	(first-time t))
    (declare (fixnum quit)
	     (short-float last-score))
ram's avatar
ram committed
1261
    (dotimes1 (i max-epochs :timeout)
ram's avatar
ram committed
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
      (train-inputs-epoch)
      (cond ((zerop *input-patience*))
	    (first-time
	     (setq first-time nil)
	     (setq last-score *best-candidate-score*))
	    ((> (abs (-sf *best-candidate-score* last-score))
		(* last-score *input-change-threshold*))
	     (setq last-score *best-candidate-score*)
	     (setq quit (+ i *input-patience*)))
	    ((>= i quit)
	     (return :stagnant))))))

;;;; Outer Loop.

(defun list-parameters ()
  "Print out the current training parameters in abbreviated form."
  (format t "SigOff ~,2F, WtRng ~,2F, WtMul ~,2F~%"
	  *sigmoid-prime-offset* *weight-range* *weight-multiplier*)
  (format t "OMu ~,2F, OEps ~,2F, ODcy ~,4F, OPat ~D, OChange ~,3F~%"
	  *output-mu* *output-epsilon* *output-decay* *output-patience*
	  *output-change-threshold*)
  (format t "IMu ~,2F, IEps ~,2F, IDcy ~,4F, IPat ~D, IChange ~,3F~%"
	  *input-mu* *input-epsilon* *input-decay* *input-patience*
	  *input-change-threshold*)
  (format t "Utype ~S, Otype ~S, RawErr ~S, Pool ~D~%"
	  *unit-type* *output-type* *raw-error* *ncandidates*))

(defun train (outlimit inlimit rounds &optional (restart nil))
  "Train the output weights until stagnation or victory is reached.  Then
  train the input weights to stagnation or victory.  Then install the best
  candidate unit and repeat.  OUTLIMIT and INLIMIT are upper limits on the number
  of cycles in each output and input phase.  ROUNDS is an upper limit on
  the number of unit-installation cycles.  If RESTART is non-nil, we are
  restarting training from the current point -- do not reinitialize the net."
  (declare (fixnum outlimit inlimit rounds))
  (unless restart (init-net))
  (list-parameters)
  (when *use-cache*
ram's avatar
ram committed
1300
    (dotimes1 (i *max-cases*)
ram's avatar
ram committed
1301 1302
      (setq *values* (svref *values-cache* i))
      (set-up-inputs (svref *training-inputs* i))))
ram's avatar
ram committed
1303
  (dotimes1 (r rounds  :lose)
ram's avatar
ram committed
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
    (case (train-outputs outlimit)
      (:win
       (list-parameters)
       (format t "Victory at ~S epochs, ~S units, ~S hidden, Error ~S.~%"
	       *epoch* *nunits* (- *nunits* *ninputs* 1) *true-error*)
       (return nil))
      (:timeout
       (format t "Epoch ~D: Out Timeout  ~D bits wrong, error ~S.~2%"
	       *epoch* *error-bits* *true-error*))
      (:stagnant
       (format t "Epoch ~D: Out Stagnant ~D bits wrong, error ~S.~2%"
	       *epoch* *error-bits* *true-error*)))
    (when *test* (test-epoch))
    (case (train-inputs inlimit)
      (:timeout
       (format t "Epoch ~D: In Timeout.  Cor: ~D~%"
	       *epoch* *best-candidate-score*))
      (:stagnant
       (format t "Epoch ~D: In Stagnant.  Cor: ~D~%"
	       *epoch* *best-candidate-score*)))
    (install-new-unit)))

(defun test-epoch (&optional (*score-threshold* 0.49999))
  "Perform forward propagation once for each set of weights in the training
  and testing vectors.  Reporting the performance.  Do not change any
  weights.  Do not use the caches."
  (let ((*use-cache* nil)
	(*values* *extra-values*)
	(*errors* *extra-errors*)
	(*error-bits* 0)
	(*true-error* 0.0)
	(*sum-error* 0.0)
	(*sum-sq-error* 0.0))
    ;; Run all training patterns and count errors.
ram's avatar
ram committed
1338
    (dotimes1 (i (length *training-inputs*))
ram's avatar
ram committed
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
      (setq *goal* (svref *training-outputs* i))
      (full-forward-pass (svref *training-inputs* i))
      (compute-errors *goal* nil t))
    (format t "Training: ~D of ~D wrong, error ~S."
	    *error-bits* (length *training-inputs*) *true-error*)
    ;; Zero some accumulators again.
    (setq *error-bits* 0)
    (setq *true-error* 0.0)
    (setq *sum-error* 0.0)
    (setq *sum-sq-error* 0.0)
    ;; Now run all test patterns and report the results.
    (when *test-inputs*
ram's avatar
ram committed
1351
      (dotimes1 (i (length *test-inputs*))
ram's avatar
ram committed
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
	(setq *goal* (svref *test-outputs* i))
	(full-forward-pass (svref *test-inputs* i))
	(compute-errors *goal* nil t)))
    (format t "  Test: ~D of ~D wrong, error ~S.~%"
	    *error-bits* (length *test-inputs*) *true-error*)))

(defun test-setup (nunits weights output-weights)
  "Set up a network for testing, given stored weights and output weights."
  (init-net)
  (setq *weights* weights)
  (setq *output-weights* output-weights)
  (setq *nunits* nunits)
  (do ((i (1+ *ninputs*) (1+ i)))
      ((= i *nunits*))
ram's avatar
ram committed
1366
    (declare (fixnum i))
ram's avatar
ram committed
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
    (setf (ivref *nconnections* i) i)
    (setf (svref *connections* i) *all-connections*)))


;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;; Example Applications ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

;;; Zig-Zag problem.  An easy one, useful for testing the code.

(defun build-zig-zag (n)
  "Build N pairs of 1-D zig-zag."
  (declare (fixnum n))
  (setq *ninputs* 1)
  (setq *noutputs* 1)
  (let ((ti (make-array (* 2 n)))
	(to (make-array (* 2 n))))
ram's avatar
ram committed
1384
    (dotimes1 (i n)
ram's avatar
ram committed
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
      (setf (svref ti (* i 2))
	    (vector (+ i 1.0)))
      (setf (svref to (* i 2))
	    (vector (if (evenp i) 0.5 -0.5)))
      (setf (svref ti (1+ (* i 2)))
	    (vector (- (+ i 1.0))))
      (setf (svref to (1+ (* i 2)))
	    (vector (if (evenp i) -0.5 0.5))))
    (setq *training-inputs* ti)
    (setq *training-outputs* to)
    (setq *test-inputs* ti)
    (setq *test-outputs* to))
  (build-net 1 1)
  (init-net))

;;; Call this with something like (BUILD-ZIG-ZAG 4), then call
;;; something like (train 100 100 25).


;;; Two spirals problem.

(defun build-two-spirals (&optional (n 97))
  "Build N point-pairs of the two-spiral problem, with standard default
  of 97 pairs."
ram's avatar
ram committed
1409
  (declare (fixnum n))
ram's avatar
ram committed
1410 1411 1412 1413
  (setq *ninputs* 2)
  (setq *noutputs* 1)
  (let ((ti (make-array (* 2 n)))
	(to (make-array (* 2 n))))
ram's avatar
ram committed
1414
    (dotimes1 (i n)
ram's avatar
ram committed
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
      (let* ((angle (/ (* i (coerce pi 'short-float)) 16.0))
	     (radius (/ (* 6.5 (- 104.0 i)) 104))
	     (x (* radius (sin angle)))
	     (y (* radius (cos angle))))
	(setf (svref ti (* i 2))
	      (vector x y))
	(setf (svref to (* i 2))
	      (vector 0.5))
	(setf (svref ti (1+ (* i 2)))
	      (vector (- x) (- y)))
	(setf (svref to (1+ (* i 2)))
	      (vector -0.5))))
    ;; Put the inner part of the spiral first on the list.
    (setq ti (nreverse ti))
    (setq to (nreverse to))
    (setq *training-inputs* ti)
    (setq *training-outputs* to)
    (setq *test-inputs* ti)
    (setq *test-outputs* to))
  (build-net 2 1)
  (init-net))

;;; To run this, call (BUILD-TWO-SPIRALS), set various control parameters,
;;; and then call something like (TRAIN 100 100 25).

;;; For parameters, try these:
;;; SigOff 0.10, WtRng 1.00, WtMul 1.00
;;; OMu 2.00, OEps 1.00, ODcy 0.0001, OPat 12, OChange 0.010
;;; IMu 2.00, IEps 100.00, IDcy 0.00000, IPat 8, IChange 0.030
;;; Utype :SIGMOID, Otype :SIGMOID, RawErr NIL, Pool 8

1446
(defvar *save-random-state*
dtc's avatar
dtc committed
1447
  (make-random-state))
1448

ram's avatar
ram committed
1449
(defun time-two-spirals ()
1450
  (setq *random-state* (make-random-state *save-random-state*))
ram's avatar
ram committed
1451 1452 1453
  (build-two-spirals)
  (time (train 100 100 25)))
	
ram's avatar
ram committed
1454
;;; The End.
ram's avatar
ram committed
1455