/* * Encog(tm) Core v3.4 - Java Version * http://www.heatonresearch.com/encog/ * https://github.com/encog/encog-java-core * Copyright 2008-2016 Heaton Research, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For more information on Heaton Research copyrights, licenses * and trademarks visit: * http://www.heatonresearch.com/copyright */ package org.encog.neural.networks.training.propagation; public interface GradientWorkerOwner { /** * Called by the worker threads to report the progress at each step. * * @param gradients * The gradients from that worker. * @param error * The error for that worker. * @param ex * The exception. */ public void report(final double[] gradients, final double error, final Throwable ex); /** * @return How much to apply l1 regularization penalty, 0 (default) for none. */ public double getL1(); /** * @return How much to apply l2 regularization penalty, 0 (default) for none. */ public double getL2(); }