001    /*

002     * JNI_SVM-light - A Java Native Interface for SVM-light

003     * 

004     * Copyright (C) 2005 

005     * Tom Crecelius & Martin Theobald 

006     * Max-Planck Institute for Computer Science

007     * 

008     * This program is free software; you can redistribute it and/or modify it under

009     * the terms of the GNU General Public License as published by the Free Software

010     * Foundation.

011     * 

012     * This program is distributed in the hope that it will be useful, but WITHOUT

013     * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS

014     * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more

015     * details.

016     * 

017     * You should have received a copy of the GNU General Public License along with

018     * this program; if not, write to the Free Software Foundation, Inc., 51

019     * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA

020     */

021    

022    package jnisvmlight;

023    

024    /**

025     * Learning parameters as denoted by SVM-light.

026     * 

027     * @author Tom Crecelius & Martin Theobald

028     */

029    public class LearnParam {

030    

031      /** Trains a classification model. */

032      public static final int CLASSIFICATION = 1;

033    

034      /** Trains on general set of constraints. */

035      public static final int OPTIMIZATION = 4;

036    

037      /** Trains a ranking model. */

038      public static final int RANKING = 3;

039    

040      /** Trains a regression model. */

041      public static final int REGRESSION = 2;

042    

043      /**

044       * File to store optimal alphas in. use empty string if alphas should not be

045       * output.

046       */

047      public String alphafile;

048    

049      /** The cardinality of the command line parameters. */

050      public int argc = 0;

051    

052      /**

053       * Optionally simulates a simple command shell-like usage and transfers the

054       * command line parameters to SVM-light.

055       */

056      public String[] argv;

057    

058      /** If nonzero, use hyperplane w*x+b=0 otherwise w*x=0. */

059      public long biased_hyperplane;

060    

061      /** If nonzero, computes leave-one-outestimates. */

062      public long compute_loo;

063    

064      /** Regression epsilon (eps=1.0 for classification). */

065      public double eps;

066    

067      /** Tolerable error on alphas at bounds. */

068      public double epsilon_a;

069    

070      /** Tolerable error on eq-constraint. */

071      public double epsilon_const;

072    

073      /** Tolerable error for distances used in stopping criterion. */

074      public double epsilon_crit;

075    

076      /** How much a multiplier should be above zero for shrinking. */

077      public double epsilon_shrink;

078    

079      /** Size of kernel cache in megabytes. */

080      public long kernel_cache_size;

081    

082      /**

083       * Number of iterations after which the optimizer terminates, if there was no

084       * progress in maxdiff.

085       */

086      public long maxiter;

087    

088      /** Precision of solver, set to e.g. 1e-21 if you get convergence problems. */

089      public double opt_precision;

090    

091      /** File for predicitions on unlabeled examples in transduction. */

092      public String predfile;

093    

094      /** Exclude examples with alpha at C and retrain. */

095      public long remove_inconsistent;

096    

097      /** Parameter in xi/alpha-estimates and for pruning leave-one-out range [1..2]. */

098      public double rho;

099    

100      /**

101       * If nonzero, it will use the shared slack variable mode. In

102       * svm_learn_optimization it requires that the slack-id is set for every

103       * training example.

104       */

105      public long sharedslack;

106    

107      /**

108       * Do not check KT-Conditions at the end of optimization for examples removed

109       * by shrinking. WARNING: This might lead to sub-optimal solutions!

110       */

111      public long skip_final_opt_check;

112    

113      /** Upper bound C on alphas. */

114      public double svm_c;

115    

116      /** Increase C by this factor every step. */

117      public double svm_c_factor;

118    

119      /** Do so many steps for finding optimal C. */

120      public long svm_c_steps;

121    

122      /** Individual upper bounds for each var. */

123      public double svm_cost;

124    

125      /** Factor to multiply C for positive examples. */

126      public double svm_costratio;

127    

128      public double svm_costratio_unlab;

129    

130      /* You probably do not want to touch the following: */

131    

132      /** Iterations h after which an example can be removed by shrinking. */

133      public long svm_iter_to_shrink;

134    

135      /** Size q of working set. */

136      public long svm_maxqpsize;

137    

138      /** New variables to enter the working set in each iteration. */

139      public long svm_newvarsinqp;

140    

141      /* The following are only for internal use: */

142    

143      public double svm_unlabbound;

144    

145      /** Total amount of features. */

146      public long totwords;

147    

148      /** Fraction of unlabeled examples to be classified as positives. */

149      public double transduction_posratio;

150    

151      /** Selects between CLASSIFICATION, REGRESSION, RANKING, or OPTIMIZATION mode. */

152      public long type;

153    

154      /** The level of SVM-light debugging infos. */

155      public int verbosity;

156    

157      /**

158       * Parameter in xi/alpha-estimates upper bounding the number of SV the current

159       * alpha_t is distributed over.

160       */

161      public long xa_depth;

162    

163      /** Initializes the learning parameters with the default SVM-light values. */

164      public LearnParam() {

165        this.verbosity = 0;

166        this.type = CLASSIFICATION;

167        this.predfile = new String("trans_predictions");

168        this.alphafile = new String("");

169        this.biased_hyperplane = 1;

170        this.sharedslack = 0;

171        this.remove_inconsistent = 0;

172        this.skip_final_opt_check = 0;

173        this.svm_maxqpsize = 10;

174        this.svm_newvarsinqp = 0;

175        this.svm_iter_to_shrink = -9999;

176        this.maxiter = 100000;

177        this.kernel_cache_size = 40;

178        this.svm_c = 0.0;

179        this.eps = 0.1;

180        this.transduction_posratio = -1.0;

181        this.svm_costratio = 1.0;

182        this.svm_costratio_unlab = 1.0;

183        this.svm_unlabbound = 1E-5;

184        this.epsilon_crit = 0.001;

185        this.epsilon_a = 1E-15;

186        this.compute_loo = 0;

187        this.rho = 1.0;

188        this.xa_depth = 0;

189      }

190    }