SVM_linear.cpp

Go to the documentation of this file.
00001 #include "lib/config.h"
00002 
00003 #ifndef DOXYGEN_SHOULD_SKIP_THIS
00004 #ifdef HAVE_LAPACK
00005 #include <math.h>
00006 #include <stdio.h>
00007 #include <stdlib.h>
00008 #include <string.h>
00009 #include <stdarg.h>
00010 
00011 #include "classifier/svm/SVM_linear.h"
00012 #include "classifier/svm/Tron.h"
00013 
00014 using namespace shogun;
00015 
00016 l2_lr_fun::l2_lr_fun(const problem *p, float64_t Cp, float64_t Cn)
00017 : function()
00018 {
00019     int32_t i;
00020     int32_t l=p->l;
00021     int32_t *y=p->y;
00022 
00023     this->prob = p;
00024 
00025     z = new float64_t[l];
00026     D = new float64_t[l];
00027     C = new float64_t[l];
00028 
00029     for (i=0; i<l; i++)
00030     {
00031         if (y[i] == 1)
00032             C[i] = Cp;
00033         else
00034             C[i] = Cn;
00035     }
00036 }
00037 
00038 l2_lr_fun::~l2_lr_fun()
00039 {
00040     delete[] z;
00041     delete[] D;
00042     delete[] C;
00043 }
00044 
00045 
00046 float64_t l2_lr_fun::fun(float64_t *w)
00047 {
00048     int32_t i;
00049     float64_t f=0;
00050     int32_t *y=prob->y;
00051     int32_t l=prob->l;
00052     int32_t n=prob->n;
00053 
00054     Xv(w, z);
00055     for(i=0;i<l;i++)
00056     {
00057             float64_t yz = y[i]*z[i];
00058         if (yz >= 0)
00059                 f += C[i]*log(1 + exp(-yz));
00060         else
00061                 f += C[i]*(-yz+log(1 + exp(yz)));
00062     }
00063     f = 2*f;
00064     for(i=0;i<n;i++)
00065         f += w[i]*w[i];
00066     f /= 2.0;
00067 
00068     return(f);
00069 }
00070 
00071 void l2_lr_fun::grad(float64_t *w, float64_t *g)
00072 {
00073     int32_t i;
00074     int32_t *y=prob->y;
00075     int32_t l=prob->l;
00076     int32_t n=prob->n;
00077 
00078     for(i=0;i<l;i++)
00079     {
00080         z[i] = 1/(1 + exp(-y[i]*z[i]));
00081         D[i] = z[i]*(1-z[i]);
00082         z[i] = C[i]*(z[i]-1)*y[i];
00083     }
00084     XTv(z, g);
00085 
00086     for(i=0;i<n;i++)
00087         g[i] = w[i] + g[i];
00088 }
00089 
00090 int32_t l2_lr_fun::get_nr_variable(void)
00091 {
00092     return prob->n;
00093 }
00094 
00095 void l2_lr_fun::Hv(float64_t *s, float64_t *Hs)
00096 {
00097     int32_t i;
00098     int32_t l=prob->l;
00099     int32_t n=prob->n;
00100     float64_t *wa = new float64_t[l];
00101 
00102     Xv(s, wa);
00103     for(i=0;i<l;i++)
00104         wa[i] = C[i]*D[i]*wa[i];
00105 
00106     XTv(wa, Hs);
00107     for(i=0;i<n;i++)
00108         Hs[i] = s[i] + Hs[i];
00109     delete[] wa;
00110 }
00111 
00112 void l2_lr_fun::Xv(float64_t *v, float64_t *res_Xv)
00113 {
00114     int32_t l=prob->l;
00115     int32_t n=prob->n;
00116 
00117     if (prob->use_bias)
00118         n--;
00119 
00120     for (int32_t i=0;i<l;i++)
00121     {
00122         res_Xv[i]=prob->x->dense_dot(i, v, n);
00123 
00124         if (prob->use_bias)
00125             res_Xv[i]+=v[n];
00126     }
00127 }
00128 
00129 void l2_lr_fun::XTv(float64_t *v, float64_t *res_XTv)
00130 {
00131     int32_t l=prob->l;
00132     int32_t n=prob->n;
00133 
00134     if (prob->use_bias)
00135         n--;
00136 
00137     memset(res_XTv, 0, sizeof(float64_t)*prob->n);
00138 
00139     for (int32_t i=0;i<l;i++)
00140     {
00141         prob->x->add_to_dense_vec(v[i], i, res_XTv, n);
00142 
00143         if (prob->use_bias)
00144             res_XTv[n]+=v[i];
00145     }
00146 }
00147 
00148 l2loss_svm_fun::l2loss_svm_fun(const problem *p, float64_t Cp, float64_t Cn)
00149 : function()
00150 {
00151     int32_t i;
00152     int32_t l=p->l;
00153     int32_t *y=p->y;
00154 
00155     this->prob = p;
00156 
00157     z = new float64_t[l];
00158     D = new float64_t[l];
00159     C = new float64_t[l];
00160     I = new int32_t[l];
00161 
00162     for (i=0; i<l; i++)
00163     {
00164         if (y[i] == 1)
00165             C[i] = Cp;
00166         else
00167             C[i] = Cn;
00168     }
00169 }
00170 
00171 l2loss_svm_fun::~l2loss_svm_fun()
00172 {
00173     delete[] z;
00174     delete[] D;
00175     delete[] C;
00176     delete[] I;
00177 }
00178 
00179 float64_t l2loss_svm_fun::fun(float64_t *w)
00180 {
00181     int32_t i;
00182     float64_t f=0;
00183     int32_t *y=prob->y;
00184     int32_t l=prob->l;
00185     int32_t n=prob->n;
00186 
00187     Xv(w, z);
00188     for(i=0;i<l;i++)
00189     {
00190             z[i] = y[i]*z[i];
00191         float64_t d = z[i]-1;
00192         if (d < 0)
00193             f += C[i]*d*d;
00194     }
00195     f = 2*f;
00196     for(i=0;i<n;i++)
00197         f += w[i]*w[i];
00198     f /= 2.0;
00199 
00200     return(f);
00201 }
00202 
00203 void l2loss_svm_fun::grad(float64_t *w, float64_t *g)
00204 {
00205     int32_t i;
00206     int32_t *y=prob->y;
00207     int32_t l=prob->l;
00208     int32_t n=prob->n;
00209 
00210     sizeI = 0;
00211     for (i=0;i<l;i++)
00212         if (z[i] < 1)
00213         {
00214             z[sizeI] = C[i]*y[i]*(z[i]-1);
00215             I[sizeI] = i;
00216             sizeI++;
00217         }
00218     subXTv(z, g);
00219 
00220     for(i=0;i<n;i++)
00221         g[i] = w[i] + 2*g[i];
00222 }
00223 
00224 int32_t l2loss_svm_fun::get_nr_variable(void)
00225 {
00226     return prob->n;
00227 }
00228 
00229 void l2loss_svm_fun::Hv(float64_t *s, float64_t *Hs)
00230 {
00231     int32_t i;
00232     int32_t l=prob->l;
00233     int32_t n=prob->n;
00234     float64_t *wa = new float64_t[l];
00235 
00236     subXv(s, wa);
00237     for(i=0;i<sizeI;i++)
00238         wa[i] = C[I[i]]*wa[i];
00239 
00240     subXTv(wa, Hs);
00241     for(i=0;i<n;i++)
00242         Hs[i] = s[i] + 2*Hs[i];
00243     delete[] wa;
00244 }
00245 
00246 void l2loss_svm_fun::Xv(float64_t *v, float64_t *res_Xv)
00247 {
00248     int32_t l=prob->l;
00249     int32_t n=prob->n;
00250 
00251     if (prob->use_bias)
00252         n--;
00253 
00254     for (int32_t i=0;i<l;i++)
00255     {
00256         res_Xv[i]=prob->x->dense_dot(i, v, n);
00257 
00258         if (prob->use_bias)
00259             res_Xv[i]+=v[n];
00260     }
00261 }
00262 
00263 void l2loss_svm_fun::subXv(float64_t *v, float64_t *res_Xv)
00264 {
00265     int32_t n=prob->n;
00266 
00267     if (prob->use_bias)
00268         n--;
00269 
00270     for (int32_t i=0;i<sizeI;i++)
00271     {
00272         res_Xv[i]=prob->x->dense_dot(I[i], v, n);
00273 
00274         if (prob->use_bias)
00275             res_Xv[i]+=v[n];
00276     }
00277 }
00278 
00279 void l2loss_svm_fun::subXTv(float64_t *v, float64_t *XTv)
00280 {
00281     int32_t n=prob->n;
00282 
00283     if (prob->use_bias)
00284         n--;
00285 
00286     memset(XTv, 0, sizeof(float64_t)*prob->n);
00287     for (int32_t i=0;i<sizeI;i++)
00288     {
00289         prob->x->add_to_dense_vec(v[i], I[i], XTv, n);
00290         
00291         if (prob->use_bias)
00292             XTv[n]+=v[i];
00293     }
00294 }
00295 
00296 #endif //HAVE_LAPACK
00297 #endif // DOXYGEN_SHOULD_SKIP_THIS

SHOGUN Machine Learning Toolbox - Documentation