/* * Encog(tm) Examples v2.4 * http://www.heatonresearch.com/encog/ * http://code.google.com/p/encog-java/ * * Copyright 2008-2010 by Heaton Research Inc. * * Released under the LGPL. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. * * Encog and Heaton Research are Trademarks of Heaton Research, Inc. * For information on Heaton Research trademarks, visit: * * http://www.heatonresearch.com/copyright.html */ package org.encog.examples.neural.recurrent.jordan; import org.encog.engine.network.activation.ActivationTANH; import org.encog.engine.util.ErrorCalculation; import org.encog.engine.util.ErrorCalculationMode; import org.encog.examples.neural.util.TemporalXOR; import org.encog.neural.data.NeuralDataSet; import org.encog.neural.networks.BasicNetwork; import org.encog.neural.networks.training.CalculateScore; import org.encog.neural.networks.training.Train; import org.encog.neural.networks.training.TrainingSetScore; import org.encog.neural.networks.training.anneal.NeuralSimulatedAnnealing; import org.encog.neural.networks.training.propagation.Propagation; import org.encog.neural.networks.training.propagation.back.Backpropagation; import org.encog.neural.networks.training.strategy.Greedy; import org.encog.neural.networks.training.strategy.HybridStrategy; import org.encog.neural.networks.training.strategy.StopTrainingStrategy; import org.encog.neural.pattern.FeedForwardPattern; import org.encog.neural.pattern.JordanPattern; import org.encog.util.logging.Logging; /** * Implement an Jordan style neural network with Encog. This network attempts to * predict the next value in an XOR sequence, taken one at a time. A regular * feedforward network would fail using a single input neuron for this task. The * internal state stored by an Jordan neural network allows better performance. * Elman networks are typically used for temporal neural networks. A Jordan * network has a single context layer connected to the output layer. * */ public class JordanXOR { static BasicNetwork createJordanNetwork() { // construct an Jordan type network JordanPattern pattern = new JordanPattern(); pattern.setActivationFunction(new ActivationTANH()); pattern.setInputNeurons(1); pattern.addHiddenLayer(6); pattern.setOutputNeurons(1); return pattern.generate(); } static BasicNetwork createFeedforwardNetwork() { // construct a feedforward type network FeedForwardPattern pattern = new FeedForwardPattern(); pattern.setActivationFunction(new ActivationTANH()); pattern.setInputNeurons(1); pattern.addHiddenLayer(2); pattern.setOutputNeurons(1); return pattern.generate(); } public static void main(final String args[]) { Logging.stopConsoleLogging(); ErrorCalculation.setMode(ErrorCalculationMode.RMS); final TemporalXOR temp = new TemporalXOR(); final NeuralDataSet trainingSet = temp.generate(120); final BasicNetwork jordanNetwork = JordanXOR.createJordanNetwork(); final BasicNetwork feedforwardNetwork = JordanXOR .createFeedforwardNetwork(); final double jordanError = JordanXOR.trainNetwork("Jordan", jordanNetwork, trainingSet); final double feedforwardError = JordanXOR.trainNetwork("Feedforward", feedforwardNetwork, trainingSet); System.out.println("Best error rate with Elman Network: " + jordanError); System.out.println("Best error rate with Feedforward Network: " + feedforwardError); System.out .println("Jordan should be able to get into the 30% range,\nfeedforward should not go below 50%.\nThe recurrent Elment net can learn better in this case."); System.out .println("If your results are not as good, try rerunning, or perhaps training longer."); } public static double trainNetwork(final String what, final BasicNetwork network, final NeuralDataSet trainingSet) { // train the neural network CalculateScore score = new TrainingSetScore(trainingSet); final Train trainAlt = new NeuralSimulatedAnnealing( network, score, 10, 2, 100); final Train trainMain = new Backpropagation(network, trainingSet,0.000001, 0.0); ((Propagation)trainMain).setNumThreads(1); final StopTrainingStrategy stop = new StopTrainingStrategy(); trainMain.addStrategy(new Greedy()); trainMain.addStrategy(new HybridStrategy(trainAlt)); trainMain.addStrategy(stop); int epoch = 0; while (!stop.shouldStop()) { trainMain.iteration(); System.out.println("Training " + what + ", Epoch #" + epoch + " Error:" + trainMain.getError()); epoch++; } return trainMain.getError(); } }