ECOCPAK v0.9
fn_ecoc_one.hpp
Go to the documentation of this file.
00001 // Copyright (C) 2011 the authors listed below
00002 // http://ecocpak.sourceforge.net
00003 //
00004 // Authors:
00005 // - Dimitrios Bouzas (bouzas at ieee dot org)
00006 // - Nikolaos Arvanitopoulos (niarvani at ieee dot org)
00007 // - Anastasios Tefas (tefas at aiia dot csd dot auth dot gr)
00008 //
00009 // This file is part of the ECOC PAK C++ library. It is
00010 // provided without any warranty of fitness for any purpose.
00011 //
00012 // You can redistribute this file and/or modify it under
00013 // the terms of the GNU Lesser General Public License (LGPL)
00014 // as published by the Free Software Foundation, either
00015 // version 3 of the License or (at your option) any later
00016 // version.
00017 // (see http://www.opensource.org/licenses for more info)
00018 
00019 
00022 
00023 
00037 void
00038 one_vs_all_ecocone
00039   (
00040   vector<ClassData>& classes_vector,
00041   const int classifiers_type,
00042   imat& coding_matrix,
00043   vector<Classifier*>& classifiers_vector
00044   )
00045   {
00046   // number of classes
00047   u32 n_classes = classes_vector.size();
00048 
00049   // allocate error correcting output codes matrix
00050   coding_matrix = -ones<imat>(n_classes, n_classes);
00051 
00052   // ================================================================ //
00053   // ||                        Training Step                       || //
00054   // ================================================================ //
00055 
00056     // auxuliary column counter
00057     u32 k = 0;
00058 
00059     // iterate through number of classes
00060     for(u32 i = 0; i < n_classes; i++)
00061       {
00062       // negative classes data matrix
00063       mat X_neg;
00064 
00065       // negative classes vector
00066       vector<ClassData*> neg_classes;
00067 
00068       // number of negative examples
00069       u32 n_neg = 0;
00070 
00071       // iterate through classes to construct X_neg
00072       for(u32 j = 0; j < n_classes; j++)
00073         {
00074         // if class is considered negative
00075         if(j != i)
00076           {
00077           // append samples of current class to the of X_neg
00078           X_neg = join_cols(X_neg, classes_vector[j].Data());
00079 
00080           // store pointer to current class to temporary vector
00081           neg_classes.push_back(&(classes_vector[j]));
00082 
00083           // increase negative samples temporary counter
00084           n_neg += classes_vector[j].Samples();
00085           }
00086 
00087         }
00088 
00089       // create and store specific classifier
00090       switch(classifiers_type)
00091         {
00092         // Nearest Class Centroid Classifier
00093         case NCC:
00094           {
00095           Classifier_ncc* tmp = new Classifier_ncc
00096                                       (
00097                                       classes_vector[i].Data(),
00098                                       X_neg
00099                                       );
00100 
00101           // update classifier's possitive class pointer
00102           tmp->pos.push_back(&(classes_vector[i]));
00103 
00104           // update classifier's negative classes
00105           tmp->neg = neg_classes;
00106 
00107           // update classifier's number of possitive samples
00108           tmp->n_pos = classes_vector[i].Samples();
00109 
00110           // update classifier's number of negative samples
00111           tmp->n_neg = n_neg;
00112 
00113           // store classifier
00114           classifiers_vector.push_back(tmp);
00115 
00116           break;
00117           }
00118 
00119         // Fisher Linear Discriminant followed by NCC
00120         case FLDA:
00121           {
00122           Classifier_flda* tmp = new Classifier_flda
00123                                        (
00124                                        classes_vector[i].Data(),
00125                                        X_neg
00126                                        );
00127 
00128           // update classifier's possitive class pointer
00129           tmp->pos.push_back(&(classes_vector[i]));
00130 
00131           // update classifier's negative classes
00132           tmp->neg = neg_classes;
00133 
00134           // update classifier's number of possitive samples
00135           tmp->n_pos = classes_vector[i].Samples();
00136 
00137           // update classifier's number of negative samples
00138           tmp->n_neg = n_neg;
00139 
00140           // store classifier
00141           classifiers_vector.push_back(tmp);
00142 
00143           break;
00144           }
00145 
00146         // Support Vector Machine Classifier
00147         case SVM:
00148           {
00149           Classifier_svm* tmp = new Classifier_svm
00150                                       (
00151                                       classes_vector[i].Data(),
00152                                       X_neg
00153                                       );
00154 
00155           // update classifier's possitive class pointer
00156           tmp->pos.push_back(&(classes_vector[i]));
00157 
00158           // update classifier's negative classes
00159           tmp->neg = neg_classes;
00160 
00161           // update classifier's number of possitive samples
00162           tmp->n_pos = classes_vector[i].Samples();
00163 
00164           // update classifier's number of negative samples
00165           tmp->n_neg = n_neg;
00166 
00167           // store classifier
00168           classifiers_vector.push_back(tmp);
00169 
00170           break;
00171           }
00172 
00173         // AdaBoost Classifier
00174         case ADABOOST:
00175           {
00176           Classifier_adaBoost* tmp = new Classifier_adaBoost
00177                                            (
00178                                            classes_vector[i].Data(),
00179                                            X_neg
00180                                            );
00181 
00182           // update classifier's possitive class pointer
00183           tmp->pos.push_back(&(classes_vector[i]));
00184 
00185           // update classifier's negative classes
00186           tmp->neg = neg_classes;
00187 
00188           // update classifier's number of possitive samples
00189           tmp->n_pos = classes_vector[i].Samples();
00190 
00191           // update classifier's number of negative samples
00192           tmp->n_neg = n_neg;
00193 
00194           // store classifier
00195           classifiers_vector.push_back(tmp);
00196 
00197           break;
00198           }
00199 
00200         // Sum of Error Squares Classifier
00201         case LEAST_SQUARES:
00202           {
00203           Classifier_ls* tmp = new Classifier_ls
00204                                      (
00205                                      classes_vector[i].Data(),
00206                                      X_neg
00207                                      );
00208 
00209           // update classifier's possitive class pointer
00210           tmp->pos.push_back(&(classes_vector[i]));
00211 
00212           // update classifier's negative classes
00213           tmp->neg = neg_classes;
00214 
00215           // update classifier's number of possitive samples
00216           tmp->n_pos = classes_vector[i].Samples();
00217 
00218           // update classifier's number of negative samples
00219           tmp->n_neg = n_neg;
00220 
00221           // store classifier
00222           classifiers_vector.push_back(tmp);
00223 
00224           break;
00225           }
00226 
00227         // Custom Classifier
00228         case CUSTOM_CLASSIFIER:
00229           {
00230           Classifier_custom* tmp = new Classifier_custom
00231                                          (
00232                                          classes_vector[i].Data(),
00233                                          X_neg
00234                                          );
00235 
00236            // update classifier's possitive class pointer
00237           tmp->pos.push_back(&(classes_vector[i]));
00238 
00239           // update classifier's negative classes
00240           tmp->neg = neg_classes;
00241 
00242           // update classifier's number of possitive samples
00243           tmp->n_pos = classes_vector[i].Samples();
00244 
00245           // update classifier's number of negative samples
00246           tmp->n_neg = n_neg;
00247 
00248           // store classifier
00249           classifiers_vector.push_back(tmp);
00250 
00251           break;
00252           }
00253 
00254         default:
00255           {
00256           arma_debug_print("one_vs_all_ecocone(): Unknown classifier's option");
00257           }
00258 
00259         }
00260 
00261       // update ECOC matrix in order to create 1 vs all configuration
00262       coding_matrix(i, k) = 1;
00263 
00264       // increase ECOC matrix column counter
00265       k++;
00266       }
00267 
00268   }
00269 
00270 
00271 
00285 void
00286 one_vs_one_ecocone
00287   (
00288   vector<ClassData>& classes_vector,
00289   const int classifiers_type,
00290   imat& coding_matrix,
00291   vector<Classifier*>& classifiers_vector
00292   )
00293   {
00294   // variable to hold the number of classes
00295   u32 n_classes = classes_vector.size();
00296 
00297   // allocate error correcting output codes matrix
00298   coding_matrix = zeros<imat>
00299                     (
00300                     n_classes,
00301                     (n_classes * (n_classes - 1)) / 2
00302                     );
00303 
00304   // ================================================================ //
00305   // ||                        Training Step                       || //
00306   // ================================================================ //
00307 
00308     // auxuliary column counter
00309     u32 k = 0;
00310 
00311     // start training of (K * (K - 1) / 2) classifiers, where K = number
00312     // of classes
00313     for(u32 i = 0; i < n_classes; i++)
00314       {
00315       for(u32 j = i+1; j < n_classes; j++)
00316         {
00317         switch(classifiers_type)
00318           {
00319           // Nearest Class Centroid Classifier
00320           case NCC:
00321             {
00322             Classifier_ncc* tmp = new Classifier_ncc
00323                                         (
00324                                         classes_vector[i].Data(),
00325                                         classes_vector[j].Data()
00326                                         );
00327 
00328             // update classifier's class objects
00329             tmp->pos.push_back(&(classes_vector[i]));
00330             tmp->neg.push_back(&(classes_vector[j]));
00331 
00332             // update classifier's number of possitive and negative
00333             // samples
00334             tmp->n_pos = classes_vector[i].Samples();
00335             tmp->n_neg = classes_vector[j].Samples();
00336 
00337             // store classifier
00338             classifiers_vector.push_back(tmp);
00339 
00340             break;
00341             }
00342 
00343           // Fisher Linear Discriminant followed by NCC
00344           case FLDA:
00345             {
00346             Classifier_flda* tmp = new Classifier_flda
00347                                         (
00348                                         classes_vector[i].Data(),
00349                                         classes_vector[j].Data()
00350                                         );
00351 
00352             // update classifier's class objects
00353             tmp->pos.push_back(&(classes_vector[i]));
00354             tmp->neg.push_back(&(classes_vector[j]));
00355 
00356             // update classifier's number of possitive and negative
00357             // samples
00358             tmp->n_pos = classes_vector[i].Samples();
00359             tmp->n_neg = classes_vector[j].Samples();
00360 
00361             // store classifier
00362             classifiers_vector.push_back(tmp);
00363 
00364             break;
00365             }
00366 
00367           // Support Vector Machine Classifier
00368           case SVM:
00369             {
00370             Classifier_svm* tmp = new Classifier_svm
00371                                         (
00372                                         classes_vector[i].Data(),
00373                                         classes_vector[j].Data()
00374                                         );
00375 
00376             // update classifier's class objects
00377             tmp->pos.push_back(&(classes_vector[i]));
00378             tmp->neg.push_back(&(classes_vector[j]));
00379 
00380             // update classifier's number of possitive and negative
00381             // samples
00382             tmp->n_pos = classes_vector[i].Samples();
00383             tmp->n_neg = classes_vector[j].Samples();
00384 
00385             // store classifier
00386             classifiers_vector.push_back(tmp);
00387 
00388             break;
00389             }
00390 
00391           // AdaBoost Classifier
00392           case ADABOOST:
00393             {
00394             Classifier_adaBoost* tmp = new Classifier_adaBoost
00395                                              (
00396                                              classes_vector[i].Data(),
00397                                              classes_vector[j].Data()
00398                                              );
00399 
00400             // update classifier's class objects
00401             tmp->pos.push_back(&(classes_vector[i]));
00402             tmp->neg.push_back(&(classes_vector[j]));
00403 
00404             // update classifier's number of possitive and negative
00405             // samples
00406             tmp->n_pos = classes_vector[i].Samples();
00407             tmp->n_neg = classes_vector[j].Samples();
00408 
00409             // store classifier
00410             classifiers_vector.push_back(tmp);
00411 
00412             break;
00413             }
00414 
00415           // Sum of Error Squares Classifier
00416           case LEAST_SQUARES:
00417             {
00418             Classifier_ls* tmp = new Classifier_ls
00419                                        (
00420                                         classes_vector[i].Data(),
00421                                         classes_vector[j].Data()
00422                                        );
00423 
00424             // update classifier's class objects
00425             tmp->pos.push_back(&(classes_vector[i]));
00426             tmp->neg.push_back(&(classes_vector[j]));
00427 
00428             // update classifier's number of possitive and negative
00429             // samples
00430             tmp->n_pos = classes_vector[i].Samples();
00431             tmp->n_neg = classes_vector[j].Samples();
00432 
00433             // store classifier
00434             classifiers_vector.push_back(tmp);
00435 
00436             break;
00437             }
00438 
00439           // Custom Classifier
00440           case CUSTOM_CLASSIFIER:
00441             {
00442             Classifier_custom* tmp = new Classifier_custom
00443                                            (
00444                                            classes_vector[i].Data(),
00445                                            classes_vector[j].Data()
00446                                            );
00447 
00448             // update classifier's class objects
00449             tmp->pos.push_back(&(classes_vector[i]));
00450             tmp->neg.push_back(&(classes_vector[j]));
00451 
00452             // update classifier's number of possitive and negative
00453             // samples
00454             tmp->n_pos = classes_vector[i].Samples();
00455             tmp->n_neg = classes_vector[j].Samples();
00456 
00457             // store classifier
00458             classifiers_vector.push_back(tmp);
00459 
00460             break;
00461             }
00462 
00463           default:
00464             {
00465             arma_debug_print("one_vs_one_ecocone(): Unknown classifier's option");
00466             }
00467 
00468           }
00469 
00470         // update ECOC matrix in order to create 1 vs 1 configuration
00471         coding_matrix(i, k) =   1;
00472         coding_matrix(j, k) =  -1;
00473 
00474         // increase ECOC matrix column counter
00475         k++;
00476         }
00477 
00478       }
00479 
00480   }
00481 
00482 
00483 
00498 void
00499 decoc_ecocone
00500   (
00501   vector<ClassData>& classes_vector,
00502   const int classifiers_type,
00503   const int criterion_option,
00504   imat& coding_matrix,
00505   vector<Classifier*>& classifiers_vector
00506   )
00507   {
00508   // ================================================================ //
00509   // ||                        Training Step                       || //
00510   // ================================================================ //
00511 
00512     decoc_coding
00513       (
00514       classes_vector,
00515       criterion_option,
00516       classifiers_type,
00517       coding_matrix,
00518       classifiers_vector
00519       );
00520 
00521   }
00522 
00523 
00524 
00540 void
00541 subdecoc_ecocone
00542   (
00543   vector<ClassData>& classes_vector,
00544   const int classifiers_type,
00545   const int criterion_option,
00546   const Threshold& thres,
00547   imat& coding_matrix,
00548   vector<Classifier*>& classifiers_vector
00549   )
00550   {
00551   // number of initial classes
00552   const u32 n_classes = classes_vector.size();
00553 
00554   // vector that keeps track of splitting
00555   vector<vec> class_tracker;
00556 
00557   // allocate coding matrix
00558   coding_matrix = zeros<imat>(n_classes, 2);
00559 
00560   // initialize class_tracker
00561   for(u32 i = 0; i < n_classes; i++)
00562     {
00563     vec temp = zeros<vec>(1);
00564     temp[0] = classes_vector[i].ClassIndex();
00565     class_tracker.push_back(temp);
00566     coding_matrix(i, 0) = i + 1;
00567     coding_matrix(i, 1) = i + 1;
00568     }
00569 
00570   // problem tracker vector -- used to store problem examined so far
00571   // in order to prune the unnecessary recursive steps of the
00572   // direct_subclass_encoding procedure
00573   vector<uvec> problem_tracker;
00574 
00575   // engage recursive subclass creation procedure
00576   direct_subclass_encoding
00577     (
00578     classes_vector,
00579     thres,
00580     criterion_option,
00581     classifiers_type,
00582     coding_matrix,
00583     classifiers_vector,
00584     classes_vector,
00585     class_tracker,
00586     problem_tracker
00587     );
00588 
00589 
00590   // by now the coding matrix is consisted of 2 columns. 1st column
00591   // represents initial class labels and 2nd colum represents resulting
00592   // classes indices (if label and index is the same then the initial
00593   // class didn't split into subclasses, if else the initial class
00594   // has been split into subclasses).
00595 
00596   // the next step is to use all the available information from the
00597   // recursive procedure direct_subclass_encoding in order to create the
00598   // codewords for each of the created classes (note: a class that has
00599   // been split into subclasses will be represented by as many codewords
00600   // in the coding matrix as the the number of its created subclasses)
00601 
00602   // create a temporary matrix to hold the codewords as well as the
00603   // class labels and indices (i.e., the first two rows of temp will be
00604   // the coding matrix so far)
00605   imat temp = zeros<imat>
00606                 (
00607                 coding_matrix.n_rows,
00608                 coding_matrix.n_cols + classifiers_vector.size()
00609                 );
00610 
00611   temp.cols(0, 1) = coding_matrix;
00612 
00613   // number of classifiers created
00614   const u32 n_classifiers = classifiers_vector.size();
00615 
00616   // number of encoding matrix rows
00617   const u32 n_rows = coding_matrix.n_rows;
00618 
00619   // for each classifier
00620   for(u32 i = 0; i < n_classifiers; i++)
00621     {
00622     // find the subclasses that the classifier recognizes and update the
00623     // corresponding column of the coding matrix with +1 or -1 according
00624     // to which set the subclass is contained (minus or plus subclass
00625     // set of classifier)
00626 
00627     // number of classes marked with +1 in current classifier
00628     const u32 n_plus_classes = classifiers_vector[i]->pos.size();
00629 
00630     // for plus subclasses
00631     for(u32 j = 0; j < n_plus_classes; j++)
00632       {
00633 
00634       // for each row of coding matrix
00635       for(u32 k = 0; k < n_rows; k++)
00636         {
00637 
00638         for
00639           (
00640           u32 l = 0;
00641           l < class_tracker[classifiers_vector[i]->pos[j]->ClassIndex() - 1].n_rows;
00642           l++
00643           )
00644           {
00645           // check if the subclass in the corresponding position of
00646           // class_tracker contains to the classifier
00647           if(
00648             class_tracker[classifiers_vector[i]->pos[j]->ClassIndex() - 1](l) ==
00649             coding_matrix(k, 1)
00650             )
00651             {
00652             temp(k, i + 2) = 1;
00653             }
00654 
00655           }
00656 
00657         }
00658 
00659       }
00660 
00661     // number of classes marked with -1 in current classifier
00662     const u32 n_minus_classes = classifiers_vector[i]->neg.size();
00663 
00664     // for minus subclasses
00665     for(u32 j = 0; j < n_minus_classes; j++)
00666       {
00667 
00668       // for each row of coding matrix
00669       for(u32 k = 0; k < n_rows; k++)
00670         {
00671 
00672         for
00673           (
00674           u32 l = 0;
00675           l < class_tracker[classifiers_vector[i]->neg[j]->ClassIndex() - 1].n_rows;
00676           l++
00677           )
00678           {
00679 
00680           // check if the subclass in the corresponding position of
00681           // class_tracker is contained in the classifier
00682           if(
00683             class_tracker[classifiers_vector[i]->neg[j]->ClassIndex() - 1](l) ==
00684             coding_matrix(k, 1)
00685             )
00686             {
00687             temp(k, i + 2) = -1;
00688             }
00689 
00690           }
00691 
00692         }
00693 
00694       }
00695 
00696     }
00697 
00698   // final coding matrix
00699   // each column represents a classifier
00700   // each row represents a codeword
00701   coding_matrix = temp.cols(2, temp.n_cols - 1);
00702   }
00703 
00704 
00705 
00723 void
00724 dense_random_ecocone
00725   (
00726   vector<ClassData>& classes_vector,
00727   const int classifiers_type,
00728   const u32 n_matrices,
00729   const u32 n_desired_classifiers,
00730   imat& coding_matrix,
00731   vector<Classifier*>& classifiers_vector
00732   )
00733   {
00734   // variable to hold the number of classes
00735   const u32 n_classes = classes_vector.size();
00736 
00737   // compute coding matrix for dense random ecoc design
00738   coding_matrix = create_dense_random_matrix
00739                     (
00740                     n_classes,
00741                     n_desired_classifiers,
00742                     n_matrices
00743                     );
00744 
00745   // ================================================================ //
00746   // ||                        Training Step                       || //
00747   // ================================================================ //
00748 
00749     // start training
00750     for(u32 i = 0; i < coding_matrix.n_cols; i++)
00751       {
00752       // data matrix of positive classes for current column of coding
00753       // matrix
00754       mat first_bipartition;
00755 
00756       // data matrix of positive classes for current column of coding
00757       // matrix
00758       mat second_bipartition;
00759 
00760       // temporary vector to store pointers of positive classes
00761       vector<ClassData*> pos_classes;
00762 
00763       // temporary vector to store pointers of negative classes
00764       vector<ClassData*> neg_classes;
00765 
00766       // temporary number of possitive samples
00767       u32 n_pos = 0;
00768 
00769       // temporary number of negative samples
00770       u32 n_neg = 0;
00771 
00772       // iterate through number of classes
00773       for(u32 j = 0; j < n_classes; j++)
00774         {
00775         // if current class is considered positive
00776         if(coding_matrix(j, i) == 1)
00777           {
00778           // append samples of current class to the positive classes data
00779           // matrix
00780           first_bipartition = join_cols
00781                                 (
00782                                 first_bipartition,
00783                                 classes_vector[j].Data()
00784                                 );
00785 
00786           // add pointer of current class to temporary vector of positive
00787           // classes
00788           pos_classes.push_back(&(classes_vector[j]));
00789 
00790           // update number of positive samples
00791           n_pos += classes_vector[j].Samples();
00792           }
00793         else
00794           {
00795           // append samples of current class to the negative classe data
00796           // matrix
00797           second_bipartition = join_cols
00798                                  (
00799                                  second_bipartition,
00800                                  classes_vector[j].Data()
00801                                  );
00802 
00803           // add pointer of current class to temporary vector of
00804           // negative classes
00805           neg_classes.push_back(&(classes_vector[j]));
00806 
00807           // update number of positive samples
00808           n_neg += classes_vector[j].Samples();
00809           }
00810 
00811         }
00812 
00813       // according to user specified classifier
00814       switch(classifiers_type)
00815         {
00816         // Nearest Class Centroid Classifier
00817         case NCC:
00818           {
00819           Classifier_ncc* tmp = new Classifier_ncc
00820                                       (
00821                                       first_bipartition,
00822                                       second_bipartition
00823                                       );
00824 
00825           // update classifier classes
00826           tmp->pos = pos_classes;
00827           tmp->neg = neg_classes;
00828           tmp->n_pos = n_pos;
00829           tmp->n_neg = n_neg;
00830 
00831           // store classifier
00832           classifiers_vector.push_back(tmp);
00833 
00834           break;
00835           }
00836 
00837         // Fisher Linear Discriminant followed by NCC
00838         case FLDA:
00839           {
00840           Classifier_flda* tmp = new Classifier_flda
00841                                        (
00842                                        first_bipartition,
00843                                        second_bipartition
00844                                        );
00845 
00846           // update classifier classes
00847           tmp->pos = pos_classes;
00848           tmp->neg = neg_classes;
00849           tmp->n_pos = n_pos;
00850           tmp->n_neg = n_neg;
00851 
00852           // store classifier
00853           classifiers_vector.push_back(tmp);
00854 
00855           break;
00856           }
00857 
00858         // Support Vector Machine Classifier
00859         case SVM:
00860           {
00861           Classifier_svm* tmp = new Classifier_svm
00862                                       (
00863                                       first_bipartition,
00864                                       second_bipartition
00865                                       );
00866 
00867           // update classifier classes
00868           tmp->pos = pos_classes;
00869           tmp->neg = neg_classes;
00870           tmp->n_pos = n_pos;
00871           tmp->n_neg = n_neg;
00872 
00873           // store classifier
00874           classifiers_vector.push_back(tmp);
00875 
00876           break;
00877           }
00878 
00879         // AdaBoost Classifier
00880         case ADABOOST:
00881           {
00882           Classifier_adaBoost* tmp = new Classifier_adaBoost
00883                                            (
00884                                            first_bipartition,
00885                                            second_bipartition
00886                                            );
00887 
00888           // update classifier classes
00889           tmp->pos = pos_classes;
00890           tmp->neg = neg_classes;
00891           tmp->n_pos = n_pos;
00892           tmp->n_neg = n_neg;
00893 
00894           // store classifier
00895           classifiers_vector.push_back(tmp);
00896 
00897           break;
00898           }
00899 
00900         // Sum of Error Squares Classifier
00901         case LEAST_SQUARES:
00902           {
00903           Classifier_ls* tmp = new Classifier_ls
00904                                      (
00905                                      first_bipartition,
00906                                      second_bipartition
00907                                      );
00908 
00909           // update classifier classes
00910           tmp->pos = pos_classes;
00911           tmp->neg = neg_classes;
00912           tmp->n_pos = n_pos;
00913           tmp->n_neg = n_neg;
00914 
00915           // store classifier
00916           classifiers_vector.push_back(tmp);
00917 
00918           break;
00919           }
00920 
00921         // Custom Classifier
00922         case CUSTOM_CLASSIFIER:
00923           {
00924           Classifier_custom* tmp = new Classifier_custom
00925                                          (
00926                                          first_bipartition,
00927                                          second_bipartition
00928                                          );
00929 
00930           // update classifier classes
00931           tmp->pos = pos_classes;
00932           tmp->neg = neg_classes;
00933           tmp->n_pos = n_pos;
00934           tmp->n_neg = n_neg;
00935 
00936           // store classifier
00937           classifiers_vector.push_back(tmp);
00938 
00939           break;
00940           }
00941 
00942         default:
00943           {
00944           arma_debug_print
00945             (
00946             "dense_random_ecocone(): Unknown classifier's option"
00947             );
00948 
00949           }
00950 
00951         }
00952 
00953       }
00954 
00955   }
00956 
00957 
00958 
00976 void
00977 sparse_random_ecocone
00978   (
00979   vector<ClassData>& classes_vector,
00980   const int classifiers_type,
00981   const u32 n_matrices,
00982   const u32 n_desired_classifiers,
00983   imat& coding_matrix,
00984   vector<Classifier*>& classifiers_vector
00985   )
00986   {
00987   // variable to hold the number of classes
00988   const u32 n_classes = classes_vector.size();
00989 
00990   // compute coding matrix for sparse random ecoc design
00991   coding_matrix = create_sparse_random_matrix
00992                     (
00993                     n_classes,
00994                     n_desired_classifiers,
00995                     n_matrices
00996                     );
00997 
00998   // ================================================================ //
00999   // ||                        Training Step                       || //
01000   // ================================================================ //
01001 
01002     // start training
01003     for(u32 i = 0; i < coding_matrix.n_cols; i++)
01004       {
01005       // data matrix of positive classes for current column of coding
01006       // matrix
01007       mat first_bipartition;
01008 
01009       // data matrix of positive classes for current column of coding
01010       // matrix
01011       mat second_bipartition;
01012 
01013       // temporary vector to store pointers of positive classes
01014       vector<ClassData*> pos_classes;
01015 
01016       // temporary vector to store pointers of negative classes
01017       vector<ClassData*> neg_classes;
01018 
01019       // temporary number of possitive samples
01020       u32 n_pos = 0;
01021 
01022       // temporary number of negative samples
01023       u32 n_neg = 0;
01024 
01025       // iterate through number of classes
01026       for(u32 j = 0; j < n_classes; j++)
01027         {
01028         // if current class is considered positive
01029         if(coding_matrix(j, i) == 1)
01030           {
01031           // append samples of current class to the positive classes
01032           // data matrix
01033           first_bipartition = join_cols
01034                                 (
01035                                 first_bipartition,
01036                                 classes_vector[j].Data()
01037                                 );
01038 
01039           // add pointer of current class to temporary vector of
01040           // positive classes
01041           pos_classes.push_back(&(classes_vector[j]));
01042 
01043           // update number of positive samples
01044           n_pos += classes_vector[j].Samples();
01045           }
01046         else
01047           {
01048           // if current class is considered negative
01049           if(coding_matrix(j, i) == -1)
01050             {
01051             // append samples of current class to the negative classe
01052             // data matrix
01053             second_bipartition = join_cols
01054                                    (
01055                                    second_bipartition,
01056                                    classes_vector[j].Data()
01057                                    );
01058 
01059             // add pointer of current class to temporary vector of
01060             // negative classes
01061             neg_classes.push_back(&(classes_vector[j]));
01062 
01063             // update number of positive samples
01064             n_neg += classes_vector[j].Samples();
01065             }
01066 
01067           // otherwise current class is not considered
01068 
01069           }
01070 
01071         }
01072 
01073       // according to user specified classifier
01074       switch(classifiers_type)
01075         {
01076          // Nearest Class Centroid Classifier
01077         case NCC:
01078           {
01079           Classifier_ncc* tmp = new Classifier_ncc
01080                                       (
01081                                       first_bipartition,
01082                                       second_bipartition
01083                                       );
01084 
01085           // update classifier classes
01086           tmp->pos = pos_classes;
01087           tmp->neg = neg_classes;
01088           tmp->n_pos = n_pos;
01089           tmp->n_neg = n_neg;
01090 
01091           // store classifier
01092           classifiers_vector.push_back(tmp);
01093 
01094           break;
01095           }
01096 
01097         // Fisher Linear Discriminant followed by NCC
01098         case FLDA:
01099           {
01100           Classifier_flda* tmp = new Classifier_flda
01101                                        (
01102                                        first_bipartition,
01103                                        second_bipartition
01104                                        );
01105 
01106           // update classifier classes
01107           tmp->pos = pos_classes;
01108           tmp->neg = neg_classes;
01109           tmp->n_pos = n_pos;
01110           tmp->n_neg = n_neg;
01111 
01112           // store classifier
01113           classifiers_vector.push_back(tmp);
01114 
01115           break;
01116           }
01117 
01118         // Support Vector Machine Classifier
01119         case SVM:
01120           {
01121           Classifier_svm* tmp = new Classifier_svm
01122                                       (
01123                                       first_bipartition,
01124                                       second_bipartition
01125                                       );
01126 
01127           // update classifier classes
01128           tmp->pos = pos_classes;
01129           tmp->neg = neg_classes;
01130           tmp->n_pos = n_pos;
01131           tmp->n_neg = n_neg;
01132 
01133           // store classifier
01134           classifiers_vector.push_back(tmp);
01135 
01136           break;
01137           }
01138 
01139         // AdaBoost Classifier
01140         case ADABOOST:
01141           {
01142           Classifier_adaBoost* tmp = new Classifier_adaBoost
01143                                            (
01144                                            first_bipartition,
01145                                            second_bipartition
01146                                            );
01147 
01148           // update classifier classes
01149           tmp->pos = pos_classes;
01150           tmp->neg = neg_classes;
01151           tmp->n_pos = n_pos;
01152           tmp->n_neg = n_neg;
01153 
01154           // store classifier
01155           classifiers_vector.push_back(tmp);
01156 
01157           break;
01158           }
01159 
01160         // Sum of Error Squares Classifier
01161         case LEAST_SQUARES:
01162           {
01163           Classifier_ls* tmp = new Classifier_ls
01164                                      (
01165                                      first_bipartition,
01166                                      second_bipartition
01167                                      );
01168 
01169           // update classifier classes
01170           tmp->pos = pos_classes;
01171           tmp->neg = neg_classes;
01172           tmp->n_pos = n_pos;
01173           tmp->n_neg = n_neg;
01174 
01175           // store classifier
01176           classifiers_vector.push_back(tmp);
01177 
01178           break;
01179           }
01180 
01181         // Custom Classifier
01182         case CUSTOM_CLASSIFIER:
01183           {
01184           Classifier_custom* tmp = new Classifier_custom
01185                                          (
01186                                          first_bipartition,
01187                                          second_bipartition
01188                                          );
01189 
01190           // update classifier classes
01191           tmp->pos = pos_classes;
01192           tmp->neg = neg_classes;
01193           tmp->n_pos = n_pos;
01194           tmp->n_neg = n_neg;
01195 
01196           // store classifier
01197           classifiers_vector.push_back(tmp);
01198 
01199           break;
01200           }
01201 
01202         default:
01203           {
01204           arma_debug_print
01205             (
01206             "sparse_random_ecocone(): Unknown classifier's option"
01207             );
01208 
01209           }
01210 
01211         }
01212 
01213       }
01214 
01215   }
01216 
01217 
01218 
01254 u32
01255 ecoc_one
01256   (
01257   const mat& training_samples,
01258   const icolvec& training_labels,
01259   const mat& testing_samples,
01260   const icolvec& testing_labels,
01261   const Threshold& thres,
01262   const int decoding_strategy,
01263   const int classifiers_type,
01264   const int criterion_option,
01265   const u32 n_matrices,
01266   const u32 n_desired_classifiers,
01267   const double validation,
01268   const int init_coding_strategy,
01269   const int ecocone_mode,
01270   const u32 max_iter,
01271   const double epsilon,
01272   const double wv,
01273   const bool verbose,
01274   ofstream& verbose_output,
01275   double& elapsed_time
01276   )
01277   {
01278   // timer object to count execution times
01279   wall_clock timer;
01280 
01281   // start timer
01282   timer.tic();
01283 
01284   // number of training samples
01285   const u32 n_training_samples = training_samples.n_rows;
01286 
01287   // number of samples attributes
01288   const u32 n_attributes = training_samples.n_cols;
01289 
01290   // number of testing samples
01291   const u32 n_testing_samples = testing_samples.n_rows;
01292 
01293   // variable to hold the number of classes
01294   u32 n_classes = 0;
01295 
01296   // adjust the training samples class labels to start from one
01297   // and count number of classes
01298   const icolvec tmp_training_labels =
01299     conv_to<icolvec>::from(process_labels(training_labels, n_classes));
01300 
01301   // adjust the testing samples class labels to start from one
01302   const uvec tmp_testing_labels = process_labels(testing_labels);
01303 
01304   // create classes vector
01305   vector<ClassData> classes_vector = create_class_vector
01306                                        (
01307                                        training_samples,
01308                                        tmp_training_labels
01309                                        );
01310 
01311   // classification error
01312   double error = 0.0;
01313 
01314   // predictions for each sample
01315   uvec predictions;
01316 
01317   // confussion matrix
01318   umat confussion;
01319 
01320   // number of misclassified samples
01321   u32 n_missed = 0;
01322 
01323   // coding matrix
01324   imat coding_matrix;
01325 
01326   // classifiers vector
01327   vector<Classifier*> classifiers_vector;
01328 
01329   // ================================================================ //
01330   // ||                        Training Step                       || //
01331   // ================================================================ //
01332 
01333     // ============================================================== //
01334     // || Step No.1: Construct initial coding.                     || //
01335     // ============================================================== //
01336 
01337       // according to user entered initial coding strategy
01338       switch(init_coding_strategy)
01339         {
01340         // Discriminant Error Correcting Output Coding
01341         case DECOC:
01342           {
01343           decoc_ecocone
01344             (
01345             classes_vector,
01346             classifiers_type,
01347             criterion_option,
01348             coding_matrix,
01349             classifiers_vector
01350             );
01351 
01352           break;
01353           }
01354 
01355         // Discriminant Error Correcting Output Coding with subclasses
01356         case SUBDECOC:
01357           {
01358           subdecoc_ecocone
01359             (
01360             classes_vector,
01361             classifiers_type,
01362             criterion_option,
01363             thres,
01364             coding_matrix,
01365             classifiers_vector
01366             );
01367 
01368           break;
01369           }
01370 
01371         // One versus One, or All Pairs Coding
01372         case ONE_VS_ONE:
01373           {
01374           one_vs_one_ecocone
01375             (
01376             classes_vector,
01377             classifiers_type,
01378             coding_matrix,
01379             classifiers_vector
01380             );
01381 
01382           break;
01383           }
01384 
01385         // One versus All, or One Against All Coding
01386         case ONE_VS_ALL:
01387           {
01388 
01389           one_vs_all_ecocone
01390             (
01391             classes_vector,
01392             classifiers_type,
01393             coding_matrix,
01394             classifiers_vector
01395             );
01396 
01397           break;
01398           }
01399 
01400         // Dense Random Coding
01401         case DENSE_RANDOM:
01402           {
01403           dense_random_ecocone
01404             (
01405             classes_vector,
01406             classifiers_type,
01407             n_matrices,
01408             n_desired_classifiers,
01409             coding_matrix,
01410             classifiers_vector
01411             );
01412 
01413           break;
01414           }
01415 
01416         // Sparse Random Coding
01417         case SPARSE_RANDOM:
01418           {
01419           sparse_random_ecocone
01420             (
01421             classes_vector,
01422             classifiers_type,
01423             n_matrices,
01424             n_desired_classifiers,
01425             coding_matrix,
01426             classifiers_vector
01427             );
01428 
01429           break;
01430           }
01431 
01432         // User Custom Coding
01433         case CUSTOM_CODING:
01434           {
01435 
01436           break;
01437           }
01438 
01439         }
01440 
01441 
01442     // ============================================================== //
01443     // || Step No.2: Construct inner training and validation sets. || //
01444     // ============================================================== //
01445 
01446       // 2D matrix of inner training set
01447       mat St;
01448 
01449       // vector of class labels of inner training set
01450       uvec lt;
01451 
01452       // 2D matrix of the inner validation set
01453       mat Sv;
01454 
01455       // vector of class labels of inner validation set
01456       uvec lv;
01457 
01458       // iterate through training class objects data matrices
01459       for(u32 i = 0; i < n_classes; i++)
01460         {
01461         // temporary shuffled data matrix of current class
01462         // shuffle in order to avoid biased estimations
01463         mat tmp = shuffle(classes_vector[i].Data());
01464 
01465         // number of samples for inner training set
01466         const u32 n_train =
01467           floor(classes_vector[i].Samples() * (1.0 - validation));
01468 
01469         // append selected number of samples of current class to
01470         // training and validation set respectively
01471         St = join_cols(St, tmp.rows(0, n_train - 1));
01472         Sv = join_cols(Sv, tmp.rows(n_train, tmp.n_rows - 1));
01473 
01474         // update respective label vectors
01475         lt = join_cols(lt, (i + 1) * ones<uvec>(n_train));
01476         lv = join_cols(lv, (i + 1) * ones<uvec>(tmp.n_rows - n_train));
01477         }
01478 
01479     // ============================================================== //
01480     // || Step No.3: Test accuracy on the training and validation  || //
01481     // ||            sets St and Sv (initialization of error for   || //
01482     // ||            later while loop).                            || //
01483     // ============================================================== //
01484 
01485       // accuracy of current coding scheme on inner training set
01486       double at = 0.0;
01487 
01488       // accuracy of current coding scheme on inner validation set
01489       double av = 0.0;
01490 
01491       // confusion matrix for inner training set
01492       umat confussion_t;
01493 
01494       // confusion matrix for inner validation set
01495       umat confussion_v;
01496 
01497       // compute accuracy for inner training set
01498       decode
01499         (
01500         St,
01501         lt,
01502         coding_matrix,
01503         classifiers_vector,
01504         classes_vector,
01505         decoding_strategy,
01506         predictions,
01507         n_missed,
01508         at,
01509         confussion_t
01510         );
01511 
01512       // compute accuracy for inner validation set
01513       decode
01514         (
01515         Sv,
01516         lv,
01517         coding_matrix,
01518         classifiers_vector,
01519         classes_vector,
01520         decoding_strategy,
01521         predictions,
01522         n_missed,
01523         av,
01524         confussion_v
01525         );
01526 
01527       // compute accuracy for inner validation set
01528 
01529       double vw = 0.5;
01530 
01531       // initialize current error
01532       double error_cur = (vw * av) + ((1.0 - vw) * at);
01533 
01534       // initialize previous error
01535       double error_prev = 1.0;
01536 
01537       // number of iterations
01538       u32 iter = 0;
01539 
01540       bool exit_loop = false;
01541 
01542       // loop while conditions are met
01543       while
01544         (
01545         error_cur > epsilon &&
01546         error_cur < error_prev &&
01547         iter < max_iter &&
01548         exit_loop == false
01549         )
01550         {
01551         // select the classes with the highest error and find partition
01552         confussion = symmatu(confussion_v) + symmatu(confussion_t);
01553 
01554         // make diagonal elements equal to zero
01555         confussion.diag() = zeros<uvec>(confussion.n_rows);
01556 
01557         // maximum row index
01558         u32 max_row = 0;
01559 
01560         // maximum column index
01561         u32 max_col = 0;
01562 
01563         // find maximum element of confusion matrix
01564         confussion.max(max_row, max_col);
01565 
01566         // put classes in the pool according to ECOCONE mode
01567         switch(ecocone_mode)
01568           {
01569           // consider only a pair of classes in the adding column
01570           case PAIR:
01571             {
01572             // if confussion matrix isn't a zero matrix
01573             if(max_row != 0 && max_col != 0)
01574               {
01575               ivec newcol = zeros<ivec>(coding_matrix.n_rows);
01576 
01577               // add positive class
01578               newcol[max_row] = 1;
01579 
01580               // add negative class
01581               newcol[max_col] = -1;
01582 
01583               // check wether a same column already exists
01584               for(u32 i = 0; i < coding_matrix.n_cols; i++)
01585                 {
01586                 if
01587                   (
01588                   coding_matrix.n_rows ==
01589                   accu(newcol == coding_matrix.col(i)) ||
01590                   coding_matrix.n_rows ==
01591                   accu(-newcol == coding_matrix.col(i))
01592                   )
01593                   {
01594                   iter = max_iter;
01595                   break;
01596                   }
01597 
01598                 }
01599 
01600               // if newly created problem isn't already in the coding
01601               // matrix
01602               if(iter < max_iter)
01603                 {
01604                 // update ECOC coding matrix
01605                 coding_matrix = join_rows(coding_matrix, newcol);
01606 
01607                 // construct and push classifier to vector of
01608                 // classifiers
01609                 Classifier* tmp_classifier =
01610                   construct_classifier
01611                     (
01612                     classes_vector[max_row].Data(),
01613                     classes_vector[max_col].Data(),
01614                     classifiers_type
01615                     );
01616 
01617                 // update classifier's class objects
01618                 tmp_classifier->pos.push_back
01619                                       (
01620                                       &(classes_vector[max_row])
01621                                       );
01622 
01623                 tmp_classifier->neg.push_back
01624                                       (
01625                                       &(classes_vector[max_col])
01626                                       );
01627 
01628                 // update classifier's number of possitive and negative
01629                 // samples
01630                 tmp_classifier->n_pos =
01631                   classes_vector[max_row].Samples();
01632 
01633                 tmp_classifier->n_neg =
01634                   classes_vector[max_col].Samples();
01635 
01636                 classifiers_vector.push_back(tmp_classifier);
01637                 }
01638 
01639               // compute accuracy for inner training set
01640               decode
01641                 (
01642                 St,
01643                 lt,
01644                 coding_matrix,
01645                 classifiers_vector,
01646                 classes_vector,
01647                 decoding_strategy,
01648                 predictions,
01649                 n_missed,
01650                 at,
01651                 confussion_t
01652                 );
01653 
01654               // compute accuracy for inner validation set
01655               decode
01656                 (
01657                 Sv,
01658                 lv,
01659                 coding_matrix,
01660                 classifiers_vector,
01661                 classes_vector,
01662                 decoding_strategy,
01663                 predictions,
01664                 n_missed,
01665                 av,
01666                 confussion_v
01667                 );
01668 
01669               double tmp_error = (vw * av) + ((1.0 - vw) * at);
01670 
01671               // if newly created classifier improves the total error
01672               if(tmp_error <= error_cur)
01673                 {
01674                 error_prev = error_cur;
01675                 error_cur = tmp_error;
01676                 }
01677               else
01678                 {
01679                 // delete classifier
01680                 Classifier* tmp_classifier =
01681                   classifiers_vector[classifiers_vector.size() - 1];
01682 
01683                 // extract classifier from classifiers vector
01684                 classifiers_vector.pop_back();
01685 
01686                 delete tmp_classifier;
01687 
01688                 // remove newly added column
01689                 coding_matrix =
01690                   coding_matrix.cols(0, coding_matrix.n_cols - 2);
01691 
01692                 // exit loop flag is true
01693                 exit_loop = true;
01694                 }
01695 
01696               }
01697 
01698             break;
01699             }
01700 
01701           // use SFFS to determine the new column of the ECOC matrix
01702           case ALL_CLASSES:
01703             {
01704             // temporary coding columns matrix
01705             imat tmpcodmat;
01706 
01707             // create vector of the rest classes
01708             vector<ClassData> rest_classes;
01709 
01710             // vector of temporary classifiers
01711             vector<Classifier*> cvec;
01712 
01713             // create vector of the rest classes
01714             vector<ClassData> curclasses;
01715 
01716             // push back classes with highest error
01717             curclasses.push_back(classes_vector[max_row]);
01718             curclasses.push_back(classes_vector[max_col]);
01719 
01720             // iterate through classes
01721             for(u32 i = 0; i < n_classes; i++)
01722               {
01723               if(i != max_row && i != max_col)
01724                 {
01725                 rest_classes.push_back(classes_vector[i]);
01726                 }
01727 
01728               }
01729 
01730             // iterate through remaining classes
01731             for(u32 i = 0; i < rest_classes.size(); i++)
01732               {
01733               curclasses.push_back(rest_classes[i]);
01734 
01735               uvec bs = sffs(curclasses, criterion_option);
01736 
01737               // compute complements set's labels
01738               uvec bsc = complement(bs, curclasses.size());
01739 
01740               ivec tmpcol = zeros<ivec>(n_classes);
01741 
01742               // first bipartion matrix
01743               mat A;
01744 
01745               // add positive classes
01746               for(u32 j = 0; j < bs.n_elem; j++)
01747                 {
01748                 tmpcol[curclasses[bs[j]].ClassIndex() - 1] = 1;
01749                 A = join_cols(A, curclasses[bs[j]].Data());
01750                 }
01751 
01752               // second bipartion matrix
01753               mat B;
01754 
01755               // add negative classes
01756               for(u32 j = 0; j < bsc.n_elem; j++)
01757                 {
01758                 tmpcol[curclasses[bsc[j]].ClassIndex() - 1] = -1;
01759                 B = join_cols(B, curclasses[bsc[j]].Data());
01760                 }
01761 
01762               bool found = false;
01763 
01764               // check wether a same column already exists
01765               for(u32 j = 0; j < coding_matrix.n_cols; j++)
01766                 {
01767                 if
01768                   (
01769                   coding_matrix.n_rows ==
01770                   accu(tmpcol == coding_matrix.col(j)) ||
01771                   coding_matrix.n_rows ==
01772                   accu(-tmpcol == coding_matrix.col(j))
01773                   )
01774                   {
01775                   found = true;
01776                   break;
01777                   }
01778 
01779                 }
01780 
01781               if(found == false)
01782                 {
01783                 // construct and push classifier to vector of classifiers
01784                 Classifier* tmp_classifier =
01785                   construct_classifier(A, B, classifiers_type);
01786 
01787                 // update classifier's class objects
01788                 for(u32 j = 0; j < bs.n_elem; j++)
01789                   {
01790                   tmp_classifier->pos.push_back
01791                                         (
01792                                         &(classes_vector[curclasses[bs[j]].ClassIndex()])
01793                                         );
01794 
01795                   tmp_classifier->n_pos += curclasses[bs[j]].Samples();
01796                   }
01797 
01798                 // update classifier's class objects
01799                 for(u32 j = 0; j < bsc.n_elem; j++)
01800                   {
01801                   tmp_classifier->pos.push_back
01802                                         (
01803                                         &(classes_vector[curclasses[bsc[j]].ClassIndex()])
01804                                         );
01805 
01806                   tmp_classifier->n_neg += curclasses[bsc[j]].Samples();
01807                   }
01808 
01809                 tmpcodmat = join_rows(tmpcodmat, tmpcol);
01810 
01811                 // push back classifier
01812                 cvec.push_back(tmp_classifier);
01813                 }
01814 
01815               }
01816 
01817             // if confussion matrix isn't diagonal
01818             if(max_row != 0 && max_col != 0)
01819               {
01820               ivec newcol = zeros<ivec>(coding_matrix.n_rows);
01821 
01822               // add positive class
01823               newcol[max_row] = 1;
01824 
01825               // add negative class
01826               newcol[max_col] = -1;
01827 
01828               bool found = false;
01829 
01830               // check wether a same column already exists
01831               for(u32 i = 0; i < coding_matrix.n_cols; i++)
01832                 {
01833                 if
01834                   (
01835                   coding_matrix.n_rows ==
01836                   accu(newcol == coding_matrix.col(i)) ||
01837                   coding_matrix.n_rows ==
01838                   accu(-newcol == coding_matrix.col(i))
01839                   )
01840                   {
01841                   found = true;
01842                   break;
01843                   }
01844 
01845                 }
01846 
01847               // if newly created problem isn't already in the coding
01848               // matrix
01849               if(found == false)
01850                 {
01851                 // update ECOC coding matrix
01852                 tmpcodmat = join_rows(tmpcodmat, newcol);
01853 
01854                 // construct and push classifier to vector of
01855                 // classifiers
01856                 Classifier* tmp_classifier =
01857                   construct_classifier
01858                     (
01859                     classes_vector[max_row].Data(),
01860                     classes_vector[max_col].Data(),
01861                     classifiers_type
01862                     );
01863 
01864                 // update classifier's class objects
01865                 tmp_classifier->pos.push_back
01866                                       (
01867                                       &(classes_vector[max_row])
01868                                       );
01869 
01870                 tmp_classifier->neg.push_back
01871                                       (
01872                                       &(classes_vector[max_col])
01873                                       );
01874 
01875                 // update classifier's number of possitive and negative
01876                 // samples
01877                 tmp_classifier->n_pos =
01878                   classes_vector[max_row].Samples();
01879 
01880                 tmp_classifier->n_neg =
01881                   classes_vector[max_col].Samples();
01882 
01883                 cvec.push_back(tmp_classifier);
01884                 }
01885 
01886               }
01887 
01888             // best index
01889             u32 indx = 0;
01890 
01891             // best error
01892             double bserror = 100.0;
01893 
01894             // iterate through number of created classifiers
01895             for(u32 i = 0; i < cvec.size(); i++)
01896               {
01897               // temporarily push classifier to classifiers vector
01898               classifiers_vector.push_back(cvec[i]);
01899 
01900               // temporary confusion matrices
01901               umat tmpconfussion_t;
01902               umat tmpconfussion_v;
01903 
01904               // temporary coding matrix
01905               imat tmpcm = join_rows(coding_matrix, tmpcodmat.col(i));
01906 
01907               // compute accuracy for inner training set
01908               decode
01909                 (
01910                 St,
01911                 lt,
01912                 tmpcm,
01913                 classifiers_vector,
01914                 classes_vector,
01915                 decoding_strategy,
01916                 predictions,
01917                 n_missed,
01918                 at,
01919                 tmpconfussion_t
01920                 );
01921 
01922               // compute accuracy for inner validation set
01923               decode
01924                 (
01925                 Sv,
01926                 lv,
01927                 tmpcm,
01928                 classifiers_vector,
01929                 classes_vector,
01930                 decoding_strategy,
01931                 predictions,
01932                 n_missed,
01933                 av,
01934                 tmpconfussion_v
01935                 );
01936 
01937               double tmp_error = (vw * av) + ((1.0 - vw) * at);
01938 
01939               if(tmp_error <= bserror)
01940                 {
01941                 indx = i;
01942                 bserror = tmp_error;
01943                 confussion_v = tmpconfussion_v;
01944                 confussion_t = tmpconfussion_t;
01945                 }
01946 
01947               // remove classifier
01948               classifiers_vector.pop_back();
01949               }
01950 
01951             // newly create column and classifier
01952             for(u32 i = 0; i < cvec.size(); i++)
01953               {
01954               if(i == indx)
01955                 {
01956                 if(bserror <= error_cur)
01957                   {
01958                   error_prev = error_cur;
01959                   error_cur = bserror;
01960                   classifiers_vector.push_back(cvec[i]);
01961                   coding_matrix = join_rows
01962                                     (
01963                                     coding_matrix,
01964                                     tmpcodmat.col(i)
01965                                     );
01966                   }
01967                 else
01968                   {
01969                   exit_loop = true;
01970                   }
01971 
01972                 }
01973               else
01974                 {
01975                 delete cvec[i];
01976                 }
01977 
01978               }
01979 
01980             break;
01981             }
01982 
01983           // exit with error
01984           default:
01985             {
01986             arma_debug_print
01987               (
01988               "ecoc_one(): Unknown ECOC One mode"
01989               );
01990             }
01991 
01992           }
01993 
01994         iter++;
01995         }
01996 
01997   // ================================================================ //
01998   // ||                        Testing Step                        || //
01999   // ================================================================ //
02000 
02001     // used to hold the number of missclassified testing samples
02002     decode
02003       (
02004       testing_samples,
02005       tmp_testing_labels,
02006       coding_matrix,
02007       classifiers_vector,
02008       classes_vector,
02009       decoding_strategy,
02010       predictions,
02011       n_missed,
02012       error,
02013       confussion
02014       );
02015 
02016   // if verbose output is activated
02017   if(verbose == true)
02018     {
02019     predictions = join_rows(predictions, tmp_testing_labels);
02020     verbose_output << "* Predictions vs Labels: " << endl << predictions << endl << endl;
02021     verbose_output << "* Coding Matrix: " << endl << coding_matrix << endl << endl;
02022     verbose_output << "* Confusion Matrix: " << endl << confussion << endl;
02023     }
02024 
02025   // clean up classifiers vector
02026   for(u32 i = 0; i < classifiers_vector.size(); i++)
02027     {
02028     delete classifiers_vector[i];
02029     }
02030 
02031   // stop timer
02032   elapsed_time = timer.toc();
02033 
02034   // reset class counter
02035   ClassData::globalIndex = 0;
02036 
02037   // return number of misclassified samples
02038   return n_missed;
02039   }
02040 
02041 
02042 
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerator Defines