/* * Copyright 2014 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.optaplanner.core.api.score.buildin.hardmediumsoftlong; import org.kie.api.runtime.rule.RuleContext; import org.optaplanner.core.api.score.Score; import org.optaplanner.core.api.score.buildin.hardmediumsoft.HardMediumSoftScore; import org.optaplanner.core.api.score.holder.AbstractScoreHolder; /** * @see HardMediumSoftScore */ public class HardMediumSoftLongScoreHolder extends AbstractScoreHolder { protected long hardScore; protected long mediumScore; protected long softScore; public HardMediumSoftLongScoreHolder(boolean constraintMatchEnabled) { super(constraintMatchEnabled, HardMediumSoftLongScore.ZERO); } public long getHardScore() { return hardScore; } public long getMediumScore() { return mediumScore; } public long getSoftScore() { return softScore; } // ************************************************************************ // Worker methods // ************************************************************************ /** * @param kcontext never null, the magic variable in DRL * @param hardWeight higher is better, negative for a penalty, positive for a reward */ public void addHardConstraintMatch(RuleContext kcontext, long hardWeight) { hardScore += hardWeight; registerConstraintMatch(kcontext, () -> hardScore -= hardWeight, () -> HardMediumSoftLongScore.valueOf(hardWeight, 0L, 0L)); } /** * @param kcontext never null, the magic variable in DRL * @param mediumWeight higher is better, negative for a penalty, positive for a reward */ public void addMediumConstraintMatch(RuleContext kcontext, long mediumWeight) { mediumScore += mediumWeight; registerConstraintMatch(kcontext, () -> mediumScore -= mediumWeight, () -> HardMediumSoftLongScore.valueOf(0L, mediumWeight, 0L)); } /** * @param kcontext never null, the magic variable in DRL * @param softWeight higher is better, negative for a penalty, positive for a reward */ public void addSoftConstraintMatch(RuleContext kcontext, long softWeight) { softScore += softWeight; registerConstraintMatch(kcontext, () -> softScore -= softWeight, () -> HardMediumSoftLongScore.valueOf(0L, 0L, softWeight)); } /** * @param kcontext never null, the magic variable in DRL * @param hardWeight higher is better, negative for a penalty, positive for a reward * @param mediumWeight higher is better, negative for a penalty, positive for a reward * @param softWeight higher is better, negative for a penalty, positive for a reward */ public void addMultiConstraintMatch(RuleContext kcontext, long hardWeight, long mediumWeight, long softWeight) { hardScore += hardWeight; mediumScore += mediumWeight; softScore += softWeight; registerConstraintMatch(kcontext, () -> { hardScore -= hardWeight; mediumScore -= mediumWeight; softScore -= softWeight; }, () -> HardMediumSoftLongScore.valueOf(hardWeight, mediumWeight, softWeight)); } @Override public Score extractScore(int initScore) { return HardMediumSoftLongScore.valueOfUninitialized(initScore, hardScore, mediumScore, softScore); } }