SHOGUN v0.9.0
|
00001 /* 00002 * This program is free software; you can redistribute it and/or modify 00003 * it under the terms of the GNU General Public License as published by 00004 * the Free Software Foundation; either version 3 of the License, or 00005 * (at your option) any later version. 00006 * 00007 * Written (W) 2007-2009 Soeren Sonnenburg 00008 * Copyright (C) 2007-2009 Fraunhofer Institute FIRST and Max-Planck-Society 00009 */ 00010 00011 #include "lib/config.h" 00012 00013 #ifdef USE_CPLEX 00014 00015 #include "classifier/LPBoost.h" 00016 #include "features/Labels.h" 00017 #include "lib/Mathematics.h" 00018 #include "lib/Cplex.h" 00019 #include "lib/DynamicArray.h" 00020 #include "lib/Signal.h" 00021 #include "lib/Time.h" 00022 00023 using namespace shogun; 00024 00025 CLPBoost::CLPBoost() 00026 : CLinearClassifier(), C1(1), C2(1), use_bias(true), epsilon(1e-3) 00027 { 00028 u=NULL; 00029 dim=NULL; 00030 num_sfeat=0; 00031 num_svec=0; 00032 sfeat=NULL; 00033 } 00034 00035 00036 CLPBoost::~CLPBoost() 00037 { 00038 cleanup(); 00039 } 00040 00041 bool CLPBoost::init(int32_t num_vec) 00042 { 00043 u=new float64_t[num_vec]; 00044 for (int32_t i=0; i<num_vec; i++) 00045 u[i]=1.0/num_vec; 00046 00047 dim=new CDynamicArray<int32_t>(100000); 00048 00049 sfeat= ((CSparseFeatures<float64_t>*) features)->get_transposed(num_sfeat, num_svec); 00050 00051 if (sfeat) 00052 return true; 00053 else 00054 return false; 00055 } 00056 00057 void CLPBoost::cleanup() 00058 { 00059 delete[] u; 00060 u=NULL; 00061 00062 ((CSparseFeatures<float64_t>*) features)->clean_tsparse(sfeat, num_svec); 00063 sfeat=NULL; 00064 00065 delete dim; 00066 dim=NULL; 00067 } 00068 00069 float64_t CLPBoost::find_max_violator(int32_t& max_dim) 00070 { 00071 float64_t max_val=0; 00072 max_dim=-1; 00073 00074 for (int32_t i=0; i<num_svec; i++) 00075 { 00076 float64_t valplus=0; 00077 float64_t valminus=0; 00078 00079 for (int32_t j=0; j<sfeat[i].num_feat_entries; j++) 00080 { 00081 int32_t idx=sfeat[i].features[j].feat_index; 00082 float64_t v=u[idx]*labels->get_label(idx)*sfeat[i].features[j].entry; 00083 valplus+=v; 00084 valminus-=v; 00085 } 00086 00087 if (valplus>max_val || max_dim==-1) 00088 { 00089 max_dim=i; 00090 max_val=valplus; 00091 } 00092 00093 if (valminus>max_val) 00094 { 00095 max_dim=num_svec+i; 00096 max_val=valminus; 00097 } 00098 } 00099 00100 dim->append_element(max_dim); 00101 return max_val; 00102 } 00103 00104 bool CLPBoost::train(CFeatures* data) 00105 { 00106 ASSERT(labels); 00107 ASSERT(features); 00108 int32_t num_train_labels=labels->get_num_labels(); 00109 int32_t num_feat=features->get_dim_feature_space(); 00110 int32_t num_vec=features->get_num_vectors(); 00111 00112 ASSERT(num_vec==num_train_labels); 00113 delete[] w; 00114 w=new float64_t[num_feat]; 00115 memset(w,0,sizeof(float64_t)*num_feat); 00116 w_dim=num_feat; 00117 00118 CCplex solver; 00119 solver.init(E_LINEAR); 00120 SG_PRINT("setting up lpboost\n"); 00121 solver.setup_lpboost(C1, num_vec); 00122 SG_PRINT("finished setting up lpboost\n"); 00123 00124 float64_t result=init(num_vec); 00125 ASSERT(result); 00126 00127 int32_t num_hypothesis=0; 00128 CTime time; 00129 CSignal::clear_cancel(); 00130 00131 while (!(CSignal::cancel_computations())) 00132 { 00133 int32_t max_dim=0; 00134 float64_t violator=find_max_violator(max_dim); 00135 SG_PRINT("iteration:%06d violator: %10.17f (>1.0) chosen: %d\n", num_hypothesis, violator, max_dim); 00136 if (violator <= 1.0+epsilon && num_hypothesis>1) //no constraint violated 00137 { 00138 SG_PRINT("converged after %d iterations!\n", num_hypothesis); 00139 break; 00140 } 00141 00142 float64_t factor=+1.0; 00143 if (max_dim>=num_svec) 00144 { 00145 factor=-1.0; 00146 max_dim-=num_svec; 00147 } 00148 00149 TSparseEntry<float64_t>* h=sfeat[max_dim].features; 00150 int32_t len=sfeat[max_dim].num_feat_entries; 00151 solver.add_lpboost_constraint(factor, h, len, num_vec, labels); 00152 solver.optimize(u); 00153 //CMath::display_vector(u, num_vec, "u"); 00154 num_hypothesis++; 00155 00156 if (get_max_train_time()>0 && time.cur_time_diff()>get_max_train_time()) 00157 break; 00158 } 00159 float64_t* lambda=new float64_t[num_hypothesis]; 00160 solver.optimize(u, lambda); 00161 00162 //CMath::display_vector(lambda, num_hypothesis, "lambda"); 00163 for (int32_t i=0; i<num_hypothesis; i++) 00164 { 00165 int32_t d=dim->get_element(i); 00166 if (d>=num_svec) 00167 w[d-num_svec]+=lambda[i]; 00168 else 00169 w[d]-=lambda[i]; 00170 00171 } 00172 //solver.write_problem("problem.lp"); 00173 solver.cleanup(); 00174 00175 cleanup(); 00176 00177 return true; 00178 } 00179 #endif