SHOGUN  v1.1.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
CombinedKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/common.h>
13 #include <shogun/io/SGIO.h>
14 #include <shogun/lib/Signal.h>
15 #include <shogun/base/Parallel.h>
16 
17 #include <shogun/kernel/Kernel.h>
21 
22 #include <string.h>
23 
24 #ifndef WIN32
25 #include <pthread.h>
26 #endif
27 
28 using namespace shogun;
29 
30 #ifndef DOXYGEN_SHOULD_SKIP_THIS
31 struct S_THREAD_PARAM
32 {
33  CKernel* kernel;
34  float64_t* result;
35  int32_t* vec_idx;
36  int32_t start;
37  int32_t end;
39  float64_t* weights;
40  int32_t* IDX;
41  int32_t num_suppvec;
42 };
43 #endif // DOXYGEN_SHOULD_SKIP_THIS
44 
45 CCombinedKernel::CCombinedKernel(int32_t size, bool asw)
46 : CKernel(size), append_subkernel_weights(asw)
47 {
48  init();
49 
51  SG_INFO( "(subkernel weights are appended)\n") ;
52 
53  SG_INFO("Combined kernel created (%p)\n", this) ;
54 }
55 
57 {
60 
61  cleanup();
63 
64  SG_INFO("Combined kernel deleted (%p).\n", this);
65 }
66 
67 bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
68 {
69  CKernel::init(l,r);
74 
75  CFeatures* lf=NULL;
76  CFeatures* rf=NULL;
77  CKernel* k=NULL;
78 
79  bool result=true;
80 
81  CListElement* lfc = NULL;
82  CListElement* rfc = NULL;
83  lf=((CCombinedFeatures*) l)->get_first_feature_obj(lfc);
84  rf=((CCombinedFeatures*) r)->get_first_feature_obj(rfc);
85  CListElement* current = NULL;
86  k=get_first_kernel(current);
87 
88  while ( result && k )
89  {
90  // skip over features - the custom kernel does not need any
91  if (k->get_kernel_type() != K_CUSTOM)
92  {
93  if (!lf || !rf)
94  {
95  SG_UNREF(lf);
96  SG_UNREF(rf);
97  SG_UNREF(k);
98  SG_ERROR( "CombinedKernel: Number of features/kernels does not match - bailing out\n");
99  }
100 
101  SG_DEBUG( "Initializing 0x%p - \"%s\"\n", this, k->get_name());
102  result=k->init(lf,rf);
103  SG_UNREF(lf);
104  SG_UNREF(rf);
105 
106  lf=((CCombinedFeatures*) l)->get_next_feature_obj(lfc) ;
107  rf=((CCombinedFeatures*) r)->get_next_feature_obj(rfc) ;
108  }
109  else
110  {
111  SG_DEBUG( "Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n", this, k->get_name());
112  if (!k->has_features())
113  SG_ERROR("No kernel matrix was assigned to this Custom kernel\n");
114  if (k->get_num_vec_lhs() != num_lhs)
115  SG_ERROR("Number of lhs-feature vectors (%d) not match with number of rows (%d) of custom kernel\n", num_lhs, k->get_num_vec_lhs());
116  if (k->get_num_vec_rhs() != num_rhs)
117  SG_ERROR("Number of rhs-feature vectors (%d) not match with number of cols (%d) of custom kernel\n", num_rhs, k->get_num_vec_rhs());
118  }
119 
120  SG_UNREF(k);
121  k=get_next_kernel(current) ;
122  }
123 
124  if (!result)
125  {
126  SG_INFO( "CombinedKernel: Initialising the following kernel failed\n");
127  if (k)
128  k->list_kernel();
129  else
130  SG_INFO( "<NULL>\n");
131  return false;
132  }
133 
134  if ((lf!=NULL) || (rf!=NULL) || (k!=NULL))
135  {
136  SG_UNREF(lf);
137  SG_UNREF(rf);
138  SG_UNREF(k);
139  SG_ERROR( "CombinedKernel: Number of features/kernels does not match - bailing out\n");
140  }
141 
142  init_normalizer();
143  initialized=true;
144  return true;
145 }
146 
148 {
150 
151  CListElement* current = NULL ;
152  CKernel* k=get_first_kernel(current);
153 
154  while (k)
155  {
156  if (k->get_kernel_type() != K_CUSTOM)
157  k->remove_lhs();
158 
159  SG_UNREF(k);
160  k=get_next_kernel(current);
161  }
163 
164  num_lhs=0;
165 }
166 
168 {
169  CListElement* current = NULL ;
170  CKernel* k=get_first_kernel(current);
171 
172  while (k)
173  {
174  if (k->get_kernel_type() != K_CUSTOM)
175  k->remove_rhs();
176  SG_UNREF(k);
177  k=get_next_kernel(current);
178  }
180 
181  num_rhs=0;
182 }
183 
185 {
187 
188  CListElement* current = NULL ;
189  CKernel* k=get_first_kernel(current);
190 
191  while (k)
192  {
193  if (k->get_kernel_type() != K_CUSTOM)
194  k->remove_lhs_and_rhs();
195  SG_UNREF(k);
196  k=get_next_kernel(current);
197  }
198 
200 
201  num_lhs=0;
202  num_rhs=0;
203 }
204 
206 {
207  CListElement* current = NULL ;
208  CKernel* k=get_first_kernel(current);
209 
210  while (k)
211  {
212  k->cleanup();
213  SG_UNREF(k);
214  k=get_next_kernel(current);
215  }
216 
218 
220 
221  num_lhs=0;
222  num_rhs=0;
223 }
224 
226 {
227  CKernel* k;
228 
229  SG_INFO( "BEGIN COMBINED KERNEL LIST - ");
230  this->list_kernel();
231 
232  CListElement* current = NULL ;
233  k=get_first_kernel(current);
234  while (k)
235  {
236  k->list_kernel();
237  SG_UNREF(k);
238  k=get_next_kernel(current);
239  }
240  SG_INFO( "END COMBINED KERNEL LIST - ");
241 }
242 
243 float64_t CCombinedKernel::compute(int32_t x, int32_t y)
244 {
245  float64_t result=0;
246  CListElement* current = NULL ;
247  CKernel* k=get_first_kernel(current);
248  while (k)
249  {
250  if (k->get_combined_kernel_weight()!=0)
251  result += k->get_combined_kernel_weight() * k->kernel(x,y);
252  SG_UNREF(k);
253  k=get_next_kernel(current);
254  }
255 
256  return result;
257 }
258 
260  int32_t count, int32_t *IDX, float64_t *weights)
261 {
262  SG_DEBUG( "initializing CCombinedKernel optimization\n");
263 
265 
266  CListElement* current=NULL;
267  CKernel *k=get_first_kernel(current);
268  bool have_non_optimizable=false;
269 
270  while(k)
271  {
272  bool ret=true;
273 
274  if (k && k->has_property(KP_LINADD))
275  ret=k->init_optimization(count, IDX, weights);
276  else
277  {
278  SG_WARNING("non-optimizable kernel 0x%X in kernel-list\n", k);
279  have_non_optimizable=true;
280  }
281 
282  if (!ret)
283  {
284  have_non_optimizable=true;
285  SG_WARNING("init_optimization of kernel 0x%X failed\n", k);
286  }
287 
288  SG_UNREF(k);
289  k=get_next_kernel(current);
290  }
291 
292  if (have_non_optimizable)
293  {
294  SG_WARNING( "some kernels in the kernel-list are not optimized\n");
295 
296  sv_idx=SG_MALLOC(int32_t, count);
297  sv_weight=SG_MALLOC(float64_t, count);
298  sv_count=count;
299  for (int32_t i=0; i<count; i++)
300  {
301  sv_idx[i]=IDX[i];
302  sv_weight[i]=weights[i];
303  }
304  }
305  set_is_initialized(true);
306 
307  return true;
308 }
309 
311 {
312  CListElement* current = NULL ;
313  CKernel* k = get_first_kernel(current);
314 
315  while(k)
316  {
317  if (k->has_property(KP_LINADD))
318  k->delete_optimization();
319 
320  SG_UNREF(k);
321  k = get_next_kernel(current);
322  }
323 
324  SG_FREE(sv_idx);
325  sv_idx = NULL;
326 
328  sv_weight = NULL;
329 
330  sv_count = 0;
331  set_is_initialized(false);
332 
333  return true;
334 }
335 
337  int32_t num_vec, int32_t* vec_idx, float64_t* result, int32_t num_suppvec,
338  int32_t* IDX, float64_t* weights, float64_t factor)
339 {
340  ASSERT(num_vec<=get_num_vec_rhs())
341  ASSERT(num_vec>0);
342  ASSERT(vec_idx);
343  ASSERT(result);
344 
345  //we have to do the optimization business ourselves but lets
346  //make sure we start cleanly
348 
349  CListElement* current = NULL ;
350  CKernel * k = get_first_kernel(current) ;
351 
352  while(k)
353  {
354  if (k && k->has_property(KP_BATCHEVALUATION))
355  {
356  if (k->get_combined_kernel_weight()!=0)
357  k->compute_batch(num_vec, vec_idx, result, num_suppvec, IDX, weights, k->get_combined_kernel_weight());
358  }
359  else
360  emulate_compute_batch(k, num_vec, vec_idx, result, num_suppvec, IDX, weights);
361 
362  SG_UNREF(k);
363  k = get_next_kernel(current);
364  }
365 
366  //clean up
368 }
369 
371 {
372  S_THREAD_PARAM* params= (S_THREAD_PARAM*) p;
373  int32_t* vec_idx=params->vec_idx;
374  CKernel* k=params->kernel;
375  float64_t* result=params->result;
376 
377  for (int32_t i=params->start; i<params->end; i++)
378  result[i] += k->get_combined_kernel_weight()*k->compute_optimized(vec_idx[i]);
379 
380  return NULL;
381 }
382 
384 {
385  S_THREAD_PARAM* params= (S_THREAD_PARAM*) p;
386  int32_t* vec_idx=params->vec_idx;
387  CKernel* k=params->kernel;
388  float64_t* result=params->result;
389  float64_t* weights=params->weights;
390  int32_t* IDX=params->IDX;
391  int32_t num_suppvec=params->num_suppvec;
392 
393  for (int32_t i=params->start; i<params->end; i++)
394  {
395  float64_t sub_result=0;
396  for (int32_t j=0; j<num_suppvec; j++)
397  sub_result += weights[j] * k->kernel(IDX[j], vec_idx[i]);
398 
399  result[i] += k->get_combined_kernel_weight()*sub_result;
400  }
401 
402  return NULL;
403 }
404 
406  CKernel* k, int32_t num_vec, int32_t* vec_idx, float64_t* result,
407  int32_t num_suppvec, int32_t* IDX, float64_t* weights)
408 {
409  ASSERT(k);
410  ASSERT(result);
411 
412  if (k->has_property(KP_LINADD))
413  {
414  if (k->get_combined_kernel_weight()!=0)
415  {
416  k->init_optimization(num_suppvec, IDX, weights);
417 
418  int32_t num_threads=parallel->get_num_threads();
419  ASSERT(num_threads>0);
420 
421  if (num_threads < 2)
422  {
423  S_THREAD_PARAM params;
424  params.kernel=k;
425  params.result=result;
426  params.start=0;
427  params.end=num_vec;
428  params.vec_idx = vec_idx;
429  compute_optimized_kernel_helper((void*) &params);
430  }
431 #ifndef WIN32
432  else
433  {
434  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
435  S_THREAD_PARAM* params = SG_MALLOC(S_THREAD_PARAM, num_threads);
436  int32_t step= num_vec/num_threads;
437 
438  int32_t t;
439 
440  for (t=0; t<num_threads-1; t++)
441  {
442  params[t].kernel = k;
443  params[t].result = result;
444  params[t].start = t*step;
445  params[t].end = (t+1)*step;
446  params[t].vec_idx = vec_idx;
447  pthread_create(&threads[t], NULL, CCombinedKernel::compute_optimized_kernel_helper, (void*)&params[t]);
448  }
449 
450  params[t].kernel = k;
451  params[t].result = result;
452  params[t].start = t*step;
453  params[t].end = num_vec;
454  params[t].vec_idx = vec_idx;
455  compute_optimized_kernel_helper((void*) &params[t]);
456 
457  for (t=0; t<num_threads-1; t++)
458  pthread_join(threads[t], NULL);
459 
460  SG_FREE(params);
461  SG_FREE(threads);
462  }
463 #endif
464 
465  k->delete_optimization();
466  }
467  }
468  else
469  {
470  ASSERT(IDX!=NULL || num_suppvec==0);
471  ASSERT(weights!=NULL || num_suppvec==0);
472 
473  if (k->get_combined_kernel_weight()!=0)
474  { // compute the usual way for any non-optimized kernel
475  int32_t num_threads=parallel->get_num_threads();
476  ASSERT(num_threads>0);
477 
478  if (num_threads < 2)
479  {
480  S_THREAD_PARAM params;
481  params.kernel=k;
482  params.result=result;
483  params.start=0;
484  params.end=num_vec;
485  params.vec_idx = vec_idx;
486  params.IDX = IDX;
487  params.weights = weights;
488  params.num_suppvec = num_suppvec;
489  compute_kernel_helper((void*) &params);
490  }
491 #ifndef WIN32
492  else
493  {
494  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
495  S_THREAD_PARAM* params = SG_MALLOC(S_THREAD_PARAM, num_threads);
496  int32_t step= num_vec/num_threads;
497 
498  int32_t t;
499 
500  for (t=0; t<num_threads-1; t++)
501  {
502  params[t].kernel = k;
503  params[t].result = result;
504  params[t].start = t*step;
505  params[t].end = (t+1)*step;
506  params[t].vec_idx = vec_idx;
507  params[t].IDX = IDX;
508  params[t].weights = weights;
509  params[t].num_suppvec = num_suppvec;
510  pthread_create(&threads[t], NULL, CCombinedKernel::compute_kernel_helper, (void*)&params[t]);
511  }
512 
513  params[t].kernel = k;
514  params[t].result = result;
515  params[t].start = t*step;
516  params[t].end = num_vec;
517  params[t].vec_idx = vec_idx;
518  params[t].IDX = IDX;
519  params[t].weights = weights;
520  params[t].num_suppvec = num_suppvec;
521  compute_kernel_helper(&params[t]);
522 
523  for (t=0; t<num_threads-1; t++)
524  pthread_join(threads[t], NULL);
525 
526  SG_FREE(params);
527  SG_FREE(threads);
528  }
529 #endif
530  }
531  }
532 }
533 
535 {
536  if (!get_is_initialized())
537  {
538  SG_ERROR("CCombinedKernel optimization not initialized\n");
539  return 0;
540  }
541 
542  float64_t result=0;
543 
544  CListElement* current=NULL;
545  CKernel *k=get_first_kernel(current);
546  while (k)
547  {
548  if (k->has_property(KP_LINADD) &&
549  k->get_is_initialized())
550  {
551  if (k->get_combined_kernel_weight()!=0)
552  {
553  result +=
555  }
556  }
557  else
558  {
559  ASSERT(sv_idx!=NULL || sv_count==0);
560  ASSERT(sv_weight!=NULL || sv_count==0);
561 
562  if (k->get_combined_kernel_weight()!=0)
563  { // compute the usual way for any non-optimized kernel
564  float64_t sub_result=0;
565  for (int32_t j=0; j<sv_count; j++)
566  sub_result += sv_weight[j] * k->kernel(sv_idx[j], idx);
567 
568  result += k->get_combined_kernel_weight()*sub_result;
569  }
570  }
571 
572  SG_UNREF(k);
573  k=get_next_kernel(current);
574  }
575 
576  return result;
577 }
578 
579 void CCombinedKernel::add_to_normal(int32_t idx, float64_t weight)
580 {
581  CListElement* current = NULL ;
582  CKernel* k = get_first_kernel(current);
583 
584  while(k)
585  {
586  k->add_to_normal(idx, weight);
587  SG_UNREF(k);
588  k = get_next_kernel(current);
589  }
590  set_is_initialized(true) ;
591 }
592 
594 {
595  CListElement* current = NULL ;
596  CKernel* k = get_first_kernel(current);
597 
598  while(k)
599  {
600  k->clear_normal() ;
601  SG_UNREF(k);
602  k = get_next_kernel(current);
603  }
604  set_is_initialized(true) ;
605 }
606 
608  int32_t idx, float64_t * subkernel_contrib)
609 {
611  {
612  int32_t i=0 ;
613  CListElement* current = NULL ;
614  CKernel* k = get_first_kernel(current);
615  while(k)
616  {
617  int32_t num = -1 ;
618  k->get_subkernel_weights(num);
619  if (num>1)
620  k->compute_by_subkernel(idx, &subkernel_contrib[i]) ;
621  else
622  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
623 
624  SG_UNREF(k);
625  k = get_next_kernel(current);
626  i += num ;
627  }
628  }
629  else
630  {
631  int32_t i=0 ;
632  CListElement* current = NULL ;
633  CKernel* k = get_first_kernel(current);
634  while(k)
635  {
636  if (k->get_combined_kernel_weight()!=0)
637  subkernel_contrib[i] += k->get_combined_kernel_weight() * k->compute_optimized(idx) ;
638 
639  SG_UNREF(k);
640  k = get_next_kernel(current);
641  i++ ;
642  }
643  }
644 }
645 
647 {
648  num_weights = get_num_subkernels() ;
651 
653  {
654  int32_t i=0 ;
655  CListElement* current = NULL ;
656  CKernel* k = get_first_kernel(current);
657  while(k)
658  {
659  int32_t num = -1 ;
660  const float64_t *w = k->get_subkernel_weights(num);
661  ASSERT(num==k->get_num_subkernels());
662  for (int32_t j=0; j<num; j++)
663  subkernel_weights_buffer[i+j]=w[j] ;
664 
665  SG_UNREF(k);
666  k = get_next_kernel(current);
667  i += num ;
668  }
669  }
670  else
671  {
672  int32_t i=0 ;
673  CListElement* current = NULL ;
674  CKernel* k = get_first_kernel(current);
675  while(k)
676  {
678 
679  SG_UNREF(k);
680  k = get_next_kernel(current);
681  i++ ;
682  }
683  }
684 
685  return subkernel_weights_buffer ;
686 }
687 
689 {
690  int32_t num=0;
691  const float64_t* w=get_subkernel_weights(num);
692 
693  return SGVector<float64_t>((float64_t*) w, num);
694 }
695 
697 {
699  {
700  int32_t i=0 ;
701  CListElement* current = NULL ;
702  CKernel* k = get_first_kernel(current);
703  while(k)
704  {
705  int32_t num = k->get_num_subkernels() ;
706  ASSERT(i<weights.vlen);
707  k->set_subkernel_weights(SGVector<float64_t>(&weights.vector[i],num));
708 
709  SG_UNREF(k);
710  k = get_next_kernel(current);
711  i += num ;
712  }
713  }
714  else
715  {
716  int32_t i=0 ;
717  CListElement* current = NULL ;
718  CKernel* k = get_first_kernel(current);
719  while(k)
720  {
721  ASSERT(i<weights.vlen);
722  k->set_combined_kernel_weight(weights.vector[i]);
723 
724  SG_UNREF(k);
725  k = get_next_kernel(current);
726  i++ ;
727  }
728  }
729 }
730 
732 {
733  CKernel* k = get_first_kernel();
734 
735  while(k)
736  {
737  k->set_optimization_type(t);
738 
739  SG_UNREF(k);
740  k = get_next_kernel();
741  }
742 
744 }
745 
747 {
748  CKernel* k = get_first_kernel();
749 
750  if (!k)
751  return false;
752 
753  CList* new_kernel_list = new CList(true);
754 
755  while(k)
756  {
757  new_kernel_list->append_element(new CCustomKernel(k));
758 
759  SG_UNREF(k);
760  k = get_next_kernel();
761  }
762 
764  kernel_list=new_kernel_list;
766 
767  return true;
768 }
769 
770 void CCombinedKernel::init()
771 {
772  sv_count=0;
773  sv_idx=NULL;
774  sv_weight=NULL;
776  initialized=false;
777 
779  kernel_list=new CList(true);
781 
782 
783  m_parameters->add((CSGObject**) &kernel_list, "kernel_list",
784  "List of kernels.");
785  m_parameters->add_vector(&sv_idx, &sv_count, "sv_idx",
786  "Support vector index.");
787  m_parameters->add_vector(&sv_weight, &sv_count, "sv_weight",
788  "Support vector weights.");
790  "append_subkernel_weights",
791  "If subkernel weights are appended.");
792  m_parameters->add(&initialized, "initialized",
793  "Whether kernel is ready to be used.");
794 }
795 

SHOGUN Machine Learning Toolbox - Documentation