001/*
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017
018package org.apache.commons.math4.neuralnet.sofm.util;
019
020import java.util.function.LongToDoubleFunction;
021
022import org.apache.commons.math4.neuralnet.internal.NeuralNetException;
023
024/**
025 * Exponential decay function: <code>a e<sup>-x / b</sup></code>,
026 * where {@code x} is the (integer) independent variable.
027 * <br>
028 * Class is immutable.
029 *
030 * @since 3.3
031 */
032public class ExponentialDecayFunction implements LongToDoubleFunction {
033    /** Factor {@code a}. */
034    private final double a;
035    /** Factor {@code 1 / b}. */
036    private final double oneOverB;
037
038    /**
039     * Creates an instance. It will be such that
040     * <ul>
041     *  <li>{@code a = initValue}</li>
042     *  <li>{@code b = -numCall / ln(valueAtNumCall / initValue)}</li>
043     * </ul>
044     *
045     * @param initValue Initial value, i.e. {@link #applyAsDouble(long) applyAsDouble(0)}.
046     * @param valueAtNumCall Value of the function at {@code numCall}.
047     * @param numCall Argument for which the function returns
048     * {@code valueAtNumCall}.
049     * @throws IllegalArgumentException if {@code initValue <= 0},
050     * {@code valueAtNumCall <= 0}, {@code valueAtNumCall >= initValue} or
051     * {@code numCall <= 0}.
052     */
053    public ExponentialDecayFunction(double initValue,
054                                    double valueAtNumCall,
055                                    long numCall) {
056        if (initValue <= 0) {
057            throw new NeuralNetException(NeuralNetException.NOT_STRICTLY_POSITIVE, initValue);
058        }
059        if (valueAtNumCall <= 0) {
060            throw new NeuralNetException(NeuralNetException.NOT_STRICTLY_POSITIVE, valueAtNumCall);
061        }
062        if (valueAtNumCall >= initValue) {
063            throw new NeuralNetException(NeuralNetException.TOO_LARGE, valueAtNumCall, initValue);
064        }
065        if (numCall <= 0) {
066            throw new NeuralNetException(NeuralNetException.NOT_STRICTLY_POSITIVE, numCall);
067        }
068
069        a = initValue;
070        oneOverB = -Math.log(valueAtNumCall / initValue) / numCall;
071    }
072
073    /**
074     * Computes <code>a e<sup>-numCall / b</sup></code>.
075     *
076     * @param numCall Current step of the training task.
077     * @return the value of the function at {@code numCall}.
078     */
079    @Override
080    public double applyAsDouble(long numCall) {
081        return a * Math.exp(-numCall * oneOverB);
082    }
083}