1
2
3
4
5
6
7
8
9 """Dataset container"""
10
11 __docformat__ = 'restructuredtext'
12
13 import operator
14 import random
15 import mvpa.support.copy as copy
16 import numpy as N
17
18 from sets import Set
19
20
21
22
23
24
25 from mvpa.misc.exceptions import DatasetError
26 from mvpa.misc.support import idhash as idhash_
27 from mvpa.base.dochelpers import enhancedDocString, table2string
28
29 from mvpa.base import warning
30
31 if __debug__:
32 from mvpa.base import debug
35 """Helper function to validate that seq contains unique sorted values
36 """
37 if operator.isSequenceType(seq):
38 seq_unique = N.unique(seq)
39 if len(seq) != len(seq_unique):
40 warning("%s() operates only with indexes for %s without"
41 " repetitions. Repetitions were removed."
42 % (fname, item))
43 if N.any(N.sort(seq) != seq_unique):
44 warning("%s() does not guarantee the original order"
45 " of selected %ss. Use selectSamples() and "
46 " selectFeatures(sort=False) instead" % (fname, item))
47
51 """*The* Dataset.
52
53 This class provides a container to store all necessary data to
54 perform MVPA analyses. These are the data samples, as well as the
55 labels associated with the samples. Additionally, samples can be
56 grouped into chunks.
57
58 :Groups:
59 - `Creators`: `__init__`, `selectFeatures`, `selectSamples`,
60 `applyMapper`
61 - `Mutators`: `permuteLabels`
62
63 Important: labels assumed to be immutable, i.e. no one should modify
64 them externally by accessing indexed items, ie something like
65 ``dataset.labels[1] += 100`` should not be used. If a label has
66 to be modified, full copy of labels should be obtained, operated on,
67 and assigned back to the dataset, otherwise dataset.uniquelabels
68 would not work. The same applies to any other attribute which has
69 corresponding unique* access property.
70
71 """
72
73
74
75
76
77
78
79
80
81
82
83
84
85 _uniqueattributes = []
86 """Unique attributes associated with the data"""
87
88 _registeredattributes = []
89 """Registered attributes (stored in _data)"""
90
91 _requiredattributes = ['samples', 'labels']
92 """Attributes which have to be provided to __init__, or otherwise
93 no default values would be assumed and construction of the
94 instance would fail"""
95
96
97
98
99
100
101
102
103
104
105 - def __init__(self,
106
107 data=None,
108 dsattr=None,
109
110 dtype=None,
111
112 samples=None,
113 labels=None,
114 labels_map=None,
115 chunks=None,
116 origids=None,
117
118 check_data=True,
119 copy_samples=False,
120 copy_data=True,
121 copy_dsattr=True):
122 """Initialize dataset instance
123
124 There are basically two different way to create a dataset:
125
126 1. Create a new dataset from samples and sample attributes. In
127 this mode a two-dimensional `ndarray` has to be passed to the
128 `samples` keyword argument and the corresponding samples
129 attributes are provided via the `labels` and `chunks`
130 arguments.
131
132 2. Copy contructor mode
133 The second way is used internally to perform quick coyping
134 of datasets, e.g. when performing feature selection. In this
135 mode and the two dictionaries (`data` and `dsattr`) are
136 required. For performance reasons this mode bypasses most of
137 the sanity check performed by the previous mode, as for
138 internal operations data integrity is assumed.
139
140
141 :Parameters:
142 data : dict
143 Dictionary with an arbitrary number of entries. The value for
144 each key in the dict has to be an ndarray with the
145 same length as the number of rows in the samples array.
146 A special entry in this dictionary is 'samples', a 2d array
147 (samples x features). A shallow copy is stored in the object.
148 dsattr : dict
149 Dictionary of dataset attributes. An arbitrary number of
150 arbitrarily named and typed objects can be stored here. A
151 shallow copy of the dictionary is stored in the object.
152 dtype: type | None
153 If None -- do not change data type if samples
154 is an ndarray. Otherwise convert samples to dtype.
155
156
157 :Keywords:
158 samples : ndarray
159 2d array (samples x features)
160 labels
161 An array or scalar value defining labels for each samples.
162 Generally `labels` should be numeric, unless `labels_map`
163 is used
164 labels_map : None or bool or dict
165 Map original labels into numeric labels. If True, the
166 mapping is computed if labels are literal. If is False,
167 no mapping is computed. If dict instance -- provided
168 mapping is verified and applied. If you want to have
169 labels_map just be present given already numeric labels,
170 just assign labels_map dictionary to existing dataset
171 instance
172 chunks
173 An array or scalar value defining chunks for each sample
174
175 Each of the Keywords arguments overwrites what is/might be
176 already in the `data` container.
177
178 """
179
180
181
182
183 if data is None:
184 data = {}
185 if dsattr is None:
186 dsattr = {}
187
188
189
190
191 if copy_data:
192
193
194
195
196 lcl_data = data.copy()
197 for k, v in data.iteritems():
198
199 if k == 'samples' and not copy_samples:
200 continue
201 lcl_data[k] = v.copy()
202 else:
203
204
205
206 lcl_data = data.copy()
207
208 if copy_dsattr and len(dsattr)>0:
209
210 if __debug__:
211 debug('DS', "Deep copying dsattr %s" % `dsattr`)
212 lcl_dsattr = copy.deepcopy(dsattr)
213
214 else:
215
216 lcl_dsattr = copy.copy(dsattr)
217
218
219
220
221 self._data = lcl_data
222 """What makes a dataset."""
223
224 self._dsattr = lcl_dsattr
225 """Dataset attriibutes."""
226
227
228 if not samples is None:
229 if __debug__:
230 if lcl_data.has_key('samples'):
231 debug('DS',
232 "`Data` dict has `samples` (%s) but there is also" \
233 " __init__ parameter `samples` which overrides " \
234 " stored in `data`" % (`lcl_data['samples'].shape`))
235 lcl_data['samples'] = self._shapeSamples(samples, dtype,
236 copy_samples)
237
238
239
240
241
242 if not labels is None:
243 if __debug__:
244 if lcl_data.has_key('labels'):
245 debug('DS',
246 "`Data` dict has `labels` (%s) but there is also" +
247 " __init__ parameter `labels` which overrides " +
248 " stored in `data`" % (`lcl_data['labels']`))
249 if lcl_data.has_key('samples'):
250 lcl_data['labels'] = \
251 self._expandSampleAttribute(labels, 'labels')
252
253
254 for attr in self._requiredattributes:
255 if not lcl_data.has_key(attr):
256 raise DatasetError, \
257 "Attribute %s is required to initialize dataset" % \
258 attr
259
260 nsamples = self.nsamples
261
262
263 if not chunks == None:
264 lcl_data['chunks'] = \
265 self._expandSampleAttribute(chunks, 'chunks')
266 elif not lcl_data.has_key('chunks'):
267
268
269 lcl_data['chunks'] = N.arange(nsamples)
270
271
272 if not origids is None:
273
274 lcl_data['origids'] = origids
275 elif not lcl_data.has_key('origids'):
276
277 lcl_data['origids'] = N.arange(len(lcl_data['labels']))
278 else:
279
280
281
282 pass
283
284
285 for attr in self._registeredattributes:
286 if not lcl_data.has_key(attr):
287 if __debug__:
288 debug("DS", "Initializing attribute %s" % attr)
289 lcl_data[attr] = N.zeros(nsamples)
290
291
292 labels_ = N.asarray(lcl_data['labels'])
293 labels_map_known = lcl_dsattr.has_key('labels_map')
294 if labels_map is True:
295
296 if labels_.dtype.char == 'S':
297
298 ulabels = list(Set(labels_))
299 ulabels.sort()
300 labels_map = dict([ (x[1], x[0]) for x in enumerate(ulabels) ])
301 if __debug__:
302 debug('DS', 'Mapping for the labels computed to be %s'
303 % labels_map)
304 else:
305 if __debug__:
306 debug('DS', 'Mapping of labels was requested but labels '
307 'are not strings. Skipped')
308 labels_map = None
309 pass
310 elif labels_map is False:
311 labels_map = None
312
313 if isinstance(labels_map, dict):
314 if labels_map_known:
315 if __debug__:
316 debug('DS',
317 "`dsattr` dict has `labels_map` (%s) but there is also" \
318 " __init__ parameter `labels_map` (%s) which overrides " \
319 " stored in `dsattr`" % (lcl_dsattr['labels_map'], labels_map))
320
321 lcl_dsattr['labels_map'] = labels_map
322
323 if labels_.dtype.char == 'S' or not labels_map_known:
324 if __debug__:
325 debug('DS_', "Remapping labels using mapping %s" % labels_map)
326
327
328 try:
329 lcl_data['labels'] = N.array(
330 [labels_map[x] for x in lcl_data['labels']])
331 except KeyError, e:
332 raise ValueError, "Provided labels_map %s is insufficient " \
333 "to map all the labels. Mapping for label %s is " \
334 "missing" % (labels_map, e)
335
336 elif not lcl_dsattr.has_key('labels_map'):
337 lcl_dsattr['labels_map'] = labels_map
338 elif __debug__:
339 debug('DS_', 'Not overriding labels_map in dsattr since it has one')
340
341 if check_data:
342 self._checkData()
343
344
345
346
347
348
349
350 if not labels is None or not chunks is None:
351
352
353 lcl_dsattr['__uniquereseted'] = False
354 self._resetallunique(force=True)
355
356
357 __doc__ = enhancedDocString('Dataset', locals())
358
359
360 @property
362 """To verify if dataset is in the same state as when smth else was done
363
364 Like if classifier was trained on the same dataset as in question"""
365
366 _data = self._data
367 res = idhash_(_data)
368
369
370
371
372 keys = _data.keys()
373 keys.sort()
374 for k in keys:
375 res += idhash_(_data[k])
376 return res
377
378
380 """Set to None all unique* attributes of corresponding dictionary
381 """
382 _dsattr = self._dsattr
383
384 if not force and _dsattr['__uniquereseted']:
385 return
386
387 _uniqueattributes = self._uniqueattributes
388
389 if __debug__ and "DS_" in debug.active:
390 debug("DS_", "Reseting all attributes %s for dataset %s"
391 % (_uniqueattributes,
392 self.summary(uniq=False, idhash=False,
393 stats=False, lstats=False)))
394
395
396 for k in _uniqueattributes:
397 _dsattr[k] = None
398 _dsattr['__uniquereseted'] = True
399
400
402 """Provide common facility to return unique attributes
403
404 XXX `dict_` can be simply replaced now with self._dsattr
405 """
406
407
408 _dsattr = self._dsattr
409
410 if not _dsattr.has_key(attrib) or _dsattr[attrib] is None:
411 if __debug__ and 'DS_' in debug.active:
412 debug("DS_", "Recomputing unique set for attrib %s within %s" %
413 (attrib, self.summary(uniq=False,
414 stats=False, lstats=False)))
415
416
417 _dsattr[attrib] = N.unique( N.asanyarray(dict_[attrib[6:]]) )
418 assert(not _dsattr[attrib] is None)
419 _dsattr['__uniquereseted'] = False
420
421 return _dsattr[attrib]
422
423
425 """Provide common facility to set attributes
426
427 """
428 if len(value) != self.nsamples:
429 raise ValueError, \
430 "Provided %s have %d entries while there is %d samples" % \
431 (attrib, len(value), self.nsamples)
432 self._data[attrib] = N.asarray(value)
433 uniqueattr = "unique" + attrib
434
435 _dsattr = self._dsattr
436 if _dsattr.has_key(uniqueattr):
437 _dsattr[uniqueattr] = None
438
439
441 """Returns the number of samples per unique label.
442 """
443
444 _data = self._data
445
446
447 uniqueattr = self._getuniqueattr(attrib="unique" + attrib,
448 dict_=_data)
449
450
451 result = dict(zip(uniqueattr, [ 0 ] * len(uniqueattr)))
452 for l in _data[attrib]:
453 result[l] += 1
454
455
456
457
458 return result
459
460
463 """Return indecies of samples given a list of attributes
464 """
465
466 if not operator.isSequenceType(values) \
467 or isinstance(values, basestring):
468 values = [ values ]
469
470
471
472 sel = N.array([], dtype=N.int16)
473 _data = self._data
474 for value in values:
475 sel = N.concatenate((
476 sel, N.where(_data[attrib]==value)[0]))
477
478 if sort:
479
480 sel.sort()
481
482 return sel
483
484
485 - def idsonboundaries(self, prior=0, post=0,
486 attributes_to_track=['labels', 'chunks'],
487 affected_labels=None,
488 revert=False):
489 """Find samples which are on the boundaries of the blocks
490
491 Such samples might need to be removed. By default (with
492 prior=0, post=0) ids of the first samples in a 'block' are
493 reported
494
495 :Parameters:
496 prior : int
497 how many samples prior to transition sample to include
498 post : int
499 how many samples post the transition sample to include
500 attributes_to_track : list of basestring
501 which attributes to track to decide on the boundary condition
502 affected_labels : list of basestring
503 for which labels to perform selection. If None - for all
504 revert : bool
505 either to revert the meaning and provide ids of samples which are found
506 to not to be boundary samples
507 """
508
509 _data = self._data
510 labels = self.labels
511 nsamples = self.nsamples
512
513 lastseen = none = [None for attr in attributes_to_track]
514 transitions = []
515
516 for i in xrange(nsamples+1):
517 if i < nsamples:
518 current = [_data[attr][i] for attr in attributes_to_track]
519 else:
520 current = none
521 if lastseen != current:
522
523 new_transitions = range(max(0, i-prior),
524 min(nsamples-1, i+post)+1)
525 if affected_labels is not None:
526 new_transitions = [labels[i] for i in new_transitions
527 if i in affected_labels]
528 transitions += new_transitions
529 lastseen = current
530
531 transitions = Set(transitions)
532 if revert:
533 transitions = Set(range(nsamples)).difference(transitions)
534
535
536 transitions = N.array(list(transitions))
537 transitions.sort()
538 return list(transitions)
539
540
542 """Adapt different kinds of samples
543
544 Handle all possible input value for 'samples' and tranform
545 them into a 2d (samples x feature) representation.
546 """
547
548
549 if (not isinstance(samples, N.ndarray)):
550
551
552 samples = N.array(samples, ndmin=2, dtype=dtype, copy=copy)
553 else:
554 if samples.ndim < 2 \
555 or (not dtype is None and dtype != samples.dtype):
556 if dtype is None:
557 dtype = samples.dtype
558 samples = N.array(samples, ndmin=2, dtype=dtype, copy=copy)
559 elif copy:
560 samples = samples.copy()
561
562
563 if len(samples.shape) > 2:
564 raise DatasetError, "Only (samples x features) -> 2d sample " \
565 + "are supported (got %s shape of samples)." \
566 % (`samples.shape`) \
567 +" Consider MappedDataset if applicable."
568
569 return samples
570
571
573 """Checks `_data` members to have the same # of samples.
574 """
575
576
577
578
579
580
581 nsamples = self.nsamples
582 _data = self._data
583
584 for k, v in _data.iteritems():
585 if not len(v) == nsamples:
586 raise DatasetError, \
587 "Length of sample attribute '%s' [%i] does not " \
588 "match the number of samples in the dataset [%i]." \
589 % (k, len(v), nsamples)
590
591
592 uniques = N.unique(_data['origids'])
593 uniques.sort()
594
595 sorted_ids = _data['origids'].copy()
596 sorted_ids.sort()
597
598 if not (uniques == sorted_ids).all():
599 raise DatasetError, "Samples IDs are not unique."
600
601
602 if N.asanyarray(_data['labels'].dtype.char == 'S'):
603 warning('Labels for dataset %s are literal, should be numeric. '
604 'You might like to use labels_map argument.' % self)
605
607 """If a sample attribute is given as a scalar expand/repeat it to a
608 length matching the number of samples in the dataset.
609 """
610 try:
611
612
613 if isinstance(attr, basestring):
614 raise TypeError
615 if len(attr) != self.nsamples:
616 raise DatasetError, \
617 "Length of sample attribute '%s' [%d]" \
618 % (attr_name, len(attr)) \
619 + " has to match the number of samples" \
620 + " [%d]." % self.nsamples
621
622 return N.array(attr)
623
624 except TypeError:
625
626
627 return N.repeat(attr, self.nsamples)
628
629
630 @classmethod
632 """Register an attribute for any Dataset class.
633
634 Creates property assigning getters/setters depending on the
635 availability of corresponding _get, _set functions.
636 """
637 classdict = cls.__dict__
638 if not classdict.has_key(key):
639 if __debug__:
640 debug("DS", "Registering new attribute %s" % key)
641
642
643 getter = '_get%s' % key
644 if classdict.has_key(getter):
645 getter = '%s.%s' % (cls.__name__, getter)
646 else:
647 getter = "lambda x: x.%s['%s']" % (dictname, key)
648
649
650
651 setter = '_set%s' % key
652 if classdict.has_key(setter):
653 setter = '%s.%s' % (cls.__name__, setter)
654 elif dictname=="_data":
655 setter = "lambda self,x: self._setdataattr" + \
656 "(attrib='%s', value=x)" % (key)
657 else:
658 setter = None
659
660 if __debug__:
661 debug("DS", "Registering new property %s.%s" %
662 (cls.__name__, key))
663 exec "%s.%s = property(fget=%s, fset=%s)" % \
664 (cls.__name__, key, getter, setter)
665
666 if abbr is not None:
667 exec "%s.%s = property(fget=%s, fset=%s)" % \
668 (cls.__name__, abbr, getter, setter)
669
670 if hasunique:
671 uniquekey = "unique%s" % key
672 getter = '_get%s' % uniquekey
673 if classdict.has_key(getter):
674 getter = '%s.%s' % (cls.__name__, getter)
675 else:
676 getter = "lambda x: x._getuniqueattr" + \
677 "(attrib='%s', dict_=x.%s)" % (uniquekey, dictname)
678
679 if __debug__:
680 debug("DS", "Registering new property %s.%s" %
681 (cls.__name__, uniquekey))
682
683 exec "%s.%s = property(fget=%s)" % \
684 (cls.__name__, uniquekey, getter)
685 if abbr is not None:
686 exec "%s.U%s = property(fget=%s)" % \
687 (cls.__name__, abbr, getter)
688
689
690 sampleskey = "samplesper%s" % key[:-1]
691 if __debug__:
692 debug("DS", "Registering new property %s.%s" %
693 (cls.__name__, sampleskey))
694
695 exec "%s.%s = property(fget=%s)" % \
696 (cls.__name__, sampleskey,
697 "lambda x: x._getNSamplesPerAttr(attrib='%s')" % key)
698
699 cls._uniqueattributes.append(uniquekey)
700
701
702 sampleskey = "idsby%s" % key
703 if __debug__:
704 debug("DS", "Registering new property %s.%s" %
705 (cls.__name__, sampleskey))
706
707 exec "%s.%s = %s" % (cls.__name__, sampleskey,
708 "lambda self, x: " +
709 "self._getSampleIdsByAttr(x,attrib='%s')" % key)
710
711 cls._uniqueattributes.append(uniquekey)
712
713 cls._registeredattributes.append(key)
714 elif __debug__:
715 warning('Trying to reregister attribute `%s`. For now ' % key +
716 'such capability is not present')
717
718
727
728
730 return "<%s>" % str(self)
731
732
733 - def summary(self, uniq=True, stats=True, idhash=False, lstats=True,
734 maxc=30, maxl=20):
735 """String summary over the object
736
737 :Parameters:
738 uniq : bool
739 Include summary over data attributes which have unique
740 idhash : bool
741 Include idhash value for dataset and samples
742 stats : bool
743 Include some basic statistics (mean, std, var) over dataset samples
744 lstats : bool
745 Include statistics on chunks/labels
746 maxc : int
747 Maximal number of chunks when provide details on labels/chunks
748 maxl : int
749 Maximal number of labels when provide details on labels/chunks
750 """
751
752 samples = self.samples
753 _data = self._data
754 _dsattr = self._dsattr
755
756 if idhash:
757 idhash_ds = "{%s}" % self.idhash
758 idhash_samples = "{%s}" % idhash_(samples)
759 else:
760 idhash_ds = ""
761 idhash_samples = ""
762
763 s = """Dataset %s/ %s %d%s x %d""" % \
764 (idhash_ds, samples.dtype,
765 self.nsamples, idhash_samples, self.nfeatures)
766
767 ssep = (' ', '\n')[lstats]
768 if uniq:
769 s += "%suniq:" % ssep
770 for uattr in _dsattr.keys():
771 if not uattr.startswith("unique"):
772 continue
773 attr = uattr[6:]
774 try:
775 value = self._getuniqueattr(attrib=uattr,
776 dict_=_data)
777 s += " %d %s" % (len(value), attr)
778 except:
779 pass
780
781 if isinstance(self.labels_map, dict):
782 s += ' labels_mapped'
783
784 if stats:
785
786
787
788 s += "%sstats: mean=%g std=%g var=%g min=%g max=%g\n" % \
789 (ssep, N.mean(samples), N.std(samples),
790 N.var(samples), N.min(samples), N.max(samples))
791
792 if lstats:
793 s += self.summary_labels(maxc=maxc, maxl=maxl)
794
795 return s
796
797
799 """Provide summary statistics over the labels and chunks
800
801 :Parameters:
802 maxc : int
803 Maximal number of chunks when provide details
804 maxl : int
805 Maximal number of labels when provide details
806 """
807
808
809 from mvpa.datasets.miscfx import getSamplesPerChunkLabel
810 spcl = getSamplesPerChunkLabel(self)
811
812 ul = self.uniquelabels.tolist()
813 uc = self.uniquechunks.tolist()
814 s = ""
815 if len(ul) < maxl and len(uc) < maxc:
816 s += "\nCounts of labels in each chunk:"
817
818 table = [[' chunks\labels'] + ul]
819 table += [[''] + ['---'] * len(ul)]
820 for c, counts in zip(uc, spcl):
821 table.append([ str(c) ] + counts.tolist())
822 s += '\n' + table2string(table)
823 else:
824 s += "No details due to large number of labels or chunks. " \
825 "Increase maxc and maxl if desired"
826
827 labels_map = self.labels_map
828 if isinstance(labels_map, dict):
829 s += "\nOriginal labels were mapped using following mapping:"
830 s += '\n\t'+'\n\t'.join([':\t'.join(map(str, x))
831 for x in labels_map.items()]) + '\n'
832
833 def cl_stats(axis, u, name1, name2):
834 """ Compute statistics per label
835 """
836 stats = {'min': N.min(spcl, axis=axis),
837 'max': N.max(spcl, axis=axis),
838 'mean': N.mean(spcl, axis=axis),
839 'std': N.std(spcl, axis=axis),
840 '#%ss' % name2: N.sum(spcl>0, axis=axis)}
841 entries = [' ' + name1, 'mean', 'std', 'min', 'max', '#%ss' % name2]
842 table = [ entries ]
843 for i, l in enumerate(u):
844 d = {' ' + name1 : l}
845 d.update(dict([ (k, stats[k][i]) for k in stats.keys()]))
846 table.append( [ ('%.3g', '%s')[isinstance(d[e], basestring)]
847 % d[e] for e in entries] )
848 return '\nSummary per %s across %ss\n' % (name1, name2) \
849 + table2string(table)
850
851 if len(ul) < maxl:
852 s += cl_stats(0, ul, 'label', 'chunk')
853 if len(uc) < maxc:
854 s += cl_stats(1, uc, 'chunk', 'label')
855 return s
856
857
859 """Merge the samples of one Dataset object to another (in-place).
860
861 No dataset attributes, besides labels_map, will be merged!
862 Additionally, a new set of unique `origids` will be generated.
863 """
864
865 _data = self._data
866 other_data = other._data
867
868 if not self.nfeatures == other.nfeatures:
869 raise DatasetError, "Cannot add Dataset, because the number of " \
870 "feature do not match."
871
872
873 slm = self.labels_map
874 olm = other.labels_map
875 if N.logical_xor(slm is None, olm is None):
876 raise ValueError, "Cannot add datasets where only one of them " \
877 "has labels map assigned. If needed -- implement it"
878
879
880 for k,v in _data.iteritems():
881 if k == 'origids':
882
883
884
885 _data[k] = N.arange(len(v) + len(other_data[k]))
886
887 elif k == 'labels' and slm is not None:
888
889
890
891 nlm = slm.copy()
892
893 nextid = N.sort(nlm.values())[-1] + 1
894 olabels = other.labels
895 olabels_remap = {}
896 for ol, olnum in olm.iteritems():
897 if not nlm.has_key(ol):
898
899
900
901 if olnum in nlm.values():
902 nextid = N.sort(nlm.values() + olm.values())[-1] + 1
903 else:
904 nextid = olnum
905 olabels_remap[olnum] = nextid
906 nlm[ol] = nextid
907 nextid += 1
908 else:
909 olabels_remap[olnum] = nlm[ol]
910 olabels = [olabels_remap[x] for x in olabels]
911
912 _data['labels'] = N.concatenate((v, olabels), axis=0)
913
914 self._dsattr['labels_map'] = nlm
915
916 if __debug__:
917
918
919
920 if (len(Set(slm.keys())) != len(Set(slm.values()))) or \
921 (len(Set(olm.keys())) != len(Set(olm.values()))):
922 warning("Adding datasets where multiple labels "
923 "mapped to the same ID is not recommended. "
924 "Please check the outcome. Original mappings "
925 "were %s and %s. Resultant is %s"
926 % (slm, olm, nlm))
927
928 else:
929 _data[k] = N.concatenate((v, other_data[k]), axis=0)
930
931
932 self._resetallunique()
933
934 return self
935
936
938 """Merge the samples two Dataset objects.
939
940 All data of both datasets is copied, concatenated and a new Dataset is
941 returned.
942
943 NOTE: This can be a costly operation (both memory and time). If
944 performance is important consider the '+=' operator.
945 """
946
947 out = super(Dataset, self).__new__(self.__class__)
948
949
950
951 out.__init__(data=self._data,
952 dsattr=self._dsattr,
953 copy_samples=True,
954 copy_data=True,
955 copy_dsattr=True)
956
957 out += other
958
959 return out
960
961
962 - def copy(self, deep=True):
963 """Create a copy (clone) of the dataset, by fully copying current one
964
965 :Keywords:
966 deep : bool
967 deep flag is provided to __init__ for
968 copy_{samples,data,dsattr}. By default full copy is done.
969 """
970
971 out = super(Dataset, self).__new__(self.__class__)
972
973
974
975 out.__init__(data=self._data,
976 dsattr=self._dsattr,
977 copy_samples=True,
978 copy_data=True,
979 copy_dsattr=True)
980
981 return out
982
983
985 """Select a number of features from the current set.
986
987 :Parameters:
988 ids
989 iterable container to select ids
990 sort : bool
991 if to sort Ids. Order matters and `selectFeatures` assumes
992 incremental order. If not such, in non-optimized code
993 selectFeatures would verify the order and sort
994
995 Returns a new Dataset object with a copy of corresponding features
996 from the original samples array.
997
998 WARNING: The order of ids determines the order of features in
999 the returned dataset. This might be useful sometimes, but can
1000 also cause major headaches! Order would is verified when
1001 running in non-optimized code (if __debug__)
1002 """
1003 if ids is None and groups is None:
1004 raise ValueError, "No feature selection specified."
1005
1006
1007 if ids is None:
1008 ids = []
1009
1010 if not groups is None:
1011 if not self._dsattr.has_key('featuregroups'):
1012 raise RuntimeError, \
1013 "Dataset has no feature grouping information."
1014
1015 for g in groups:
1016 ids += (self._dsattr['featuregroups'] == g).nonzero()[0].tolist()
1017
1018
1019
1020 if sort:
1021 ids = copy.deepcopy(ids)
1022 ids.sort()
1023 elif __debug__ and 'CHECK_DS_SORTED' in debug.active:
1024 from mvpa.misc.support import isSorted
1025 if not isSorted(ids):
1026 warning("IDs for selectFeatures must be provided " +
1027 "in sorted order, otherwise major headache might occur")
1028
1029
1030 new_data = self._data.copy()
1031
1032
1033
1034 new_data['samples'] = self._data['samples'][:, ids]
1035
1036
1037 if self._dsattr.has_key('featuregroups'):
1038 new_dsattr = self._dsattr.copy()
1039 new_dsattr['featuregroups'] = self._dsattr['featuregroups'][ids]
1040 else:
1041 new_dsattr = self._dsattr
1042
1043
1044 dataset = super(Dataset, self).__new__(self.__class__)
1045
1046
1047
1048 dataset.__init__(data=new_data,
1049 dsattr=new_dsattr,
1050 check_data=False,
1051 copy_samples=False,
1052 copy_data=False,
1053 copy_dsattr=False
1054 )
1055
1056 return dataset
1057
1058
1059 - def applyMapper(self, featuresmapper=None, samplesmapper=None,
1060 train=True):
1061 """Obtain new dataset by applying mappers over features and/or samples.
1062
1063 While featuresmappers leave the sample attributes information
1064 unchanged, as the number of samples in the dataset is invariant,
1065 samplesmappers are also applied to the samples attributes themselves!
1066
1067 Applying a featuresmapper will destroy any feature grouping information.
1068
1069 :Parameters:
1070 featuresmapper : Mapper
1071 `Mapper` to somehow transform each sample's features
1072 samplesmapper : Mapper
1073 `Mapper` to transform each feature across samples
1074 train : bool
1075 Flag whether to train the mapper with this dataset before applying
1076 it.
1077
1078 TODO: selectFeatures is pretty much
1079 applyMapper(featuresmapper=MaskMapper(...))
1080 """
1081
1082
1083 new_data = self._data.copy()
1084
1085
1086
1087 if samplesmapper:
1088 if __debug__:
1089 debug("DS", "Training samplesmapper %s" % `samplesmapper`)
1090 samplesmapper.train(self)
1091
1092 if __debug__:
1093 debug("DS", "Applying samplesmapper %s" % `samplesmapper` +
1094 " to samples of dataset `%s`" % `self`)
1095
1096
1097
1098 if new_data.has_key('origids'):
1099 del(new_data['origids'])
1100
1101
1102 for k in new_data.keys():
1103 new_data[k] = samplesmapper.forward(self._data[k])
1104
1105
1106
1107 new_dsattr = self._dsattr
1108
1109 if featuresmapper:
1110 if __debug__:
1111 debug("DS", "Training featuresmapper %s" % `featuresmapper`)
1112 featuresmapper.train(self)
1113
1114 if __debug__:
1115 debug("DS", "Applying featuresmapper %s" % `featuresmapper` +
1116 " to samples of dataset `%s`" % `self`)
1117 new_data['samples'] = featuresmapper.forward(self._data['samples'])
1118
1119
1120
1121 if self._dsattr.has_key('featuregroups'):
1122 new_dsattr = self._dsattr.copy()
1123 del(new_dsattr['featuregroups'])
1124 else:
1125 new_dsattr = self._dsattr
1126
1127
1128 dataset = super(Dataset, self).__new__(self.__class__)
1129
1130
1131
1132 dataset.__init__(data=new_data,
1133 dsattr=new_dsattr,
1134 check_data=False,
1135 copy_samples=False,
1136 copy_data=False,
1137 copy_dsattr=False
1138 )
1139
1140
1141 if samplesmapper:
1142 dataset._resetallunique(force=True)
1143
1144 return dataset
1145
1146
1148 """Choose a subset of samples defined by samples IDs.
1149
1150 Returns a new dataset object containing the selected sample
1151 subset.
1152
1153 TODO: yoh, we might need to sort the mask if the mask is a
1154 list of ids and is not ordered. Clarify with Michael what is
1155 our intent here!
1156 """
1157
1158
1159 if not operator.isSequenceType( ids ):
1160 ids = [ids]
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 data = {}
1180 for k, v in self._data.iteritems():
1181 data[k] = v[ids, ]
1182
1183
1184 dataset = super(Dataset, self).__new__(self.__class__)
1185
1186
1187
1188 dataset.__init__(data=data,
1189 dsattr=self._dsattr,
1190 check_data=False,
1191 copy_samples=False,
1192 copy_data=False,
1193 copy_dsattr=False)
1194
1195 dataset._resetallunique(force=True)
1196 return dataset
1197
1198
1199
1200 - def index(self, *args, **kwargs):
1201 """Universal indexer to obtain indexes of interesting samples/features.
1202 See .select() for more information
1203
1204 :Return: tuple of (samples indexes, features indexes). Each
1205 item could be also None, if no selection on samples or
1206 features was requested (to discriminate between no selected
1207 items, and no selections)
1208 """
1209 s_indx = []
1210 f_indx = []
1211 return_dataset = kwargs.pop('return_dataset', False)
1212 largs = len(args)
1213
1214 args = list(args)
1215
1216 largs_nonstring = 0
1217
1218 for i in xrange(largs):
1219 l = args[i]
1220 if isinstance(l, basestring):
1221 if l.lower() == 'all':
1222
1223 args[i] = slice(None)
1224 else:
1225 break
1226 largs_nonstring += 1
1227
1228 if largs_nonstring >= 1:
1229 s_indx.append(args[0])
1230 if __debug__ and 'CHECK_DS_SELECT' in debug.active:
1231 _validate_indexes_uniq_sorted(args[0], 'select', 'samples')
1232 if largs_nonstring == 2:
1233 f_indx.append(args[1])
1234 if __debug__ and 'CHECK_DS_SELECT' in debug.active:
1235 _validate_indexes_uniq_sorted(args[1], 'select', 'features')
1236 elif largs_nonstring > 2:
1237 raise ValueError, "Only two positional arguments are allowed" \
1238 ". 1st for samples, 2nd for features"
1239
1240
1241
1242
1243 if (largs - largs_nonstring) % 2 != 0:
1244 raise ValueError, "Positional selections must come in pairs:" \
1245 " e.g. ('labels', [1,2,3])"
1246
1247 for i in xrange(largs_nonstring, largs, 2):
1248 k, v = args[i:i+2]
1249 kwargs[k] = v
1250
1251
1252 data_ = self._data
1253 for k, v in kwargs.iteritems():
1254 if k == 'samples':
1255 s_indx.append(v)
1256 elif k == 'features':
1257 f_indx.append(v)
1258 elif data_.has_key(k):
1259
1260
1261 if __debug__:
1262 if not N.any([isinstance(v, cls) for cls in
1263 [list, tuple, slice, int]]):
1264 raise ValueError, "Trying to specify selection for %s " \
1265 "based on unsupported '%s'" % (k, v)
1266 s_indx.append(self._getSampleIdsByAttr(v, attrib=k, sort=False))
1267 else:
1268 raise ValueError, 'Keyword "%s" is not known, thus' \
1269 'select() failed' % k
1270
1271 def combine_indexes(indx, nelements):
1272 """Helper function: intersect selections given in indx
1273
1274 :Parameters:
1275 indxs : list of lists or slices
1276 selections of elements
1277 nelements : int
1278 number of elements total for deriving indexes from slices
1279 """
1280 indx_sel = None
1281 for s in indx:
1282 if isinstance(s, slice) or \
1283 isinstance(s, N.ndarray) and s.dtype==bool:
1284
1285
1286
1287 all_indexes = N.arange(nelements)
1288 s = all_indexes[s]
1289 elif not operator.isSequenceType(s):
1290 s = [ s ]
1291
1292 if indx_sel is None:
1293 indx_sel = Set(s)
1294 else:
1295
1296
1297
1298 indx_sel = indx_sel.intersection(s)
1299
1300
1301 if isinstance(indx_sel, Set):
1302 indx_sel = list(indx_sel)
1303
1304
1305 indx_sel.sort()
1306
1307 return indx_sel
1308
1309
1310 if len(s_indx) == 1 and isinstance(s_indx[0], slice) \
1311 and s_indx[0] == slice(None):
1312
1313 s_indx = s_indx[0]
1314 else:
1315
1316 if len(s_indx) == 0:
1317 s_indx = None
1318 else:
1319 s_indx = combine_indexes(s_indx, self.nsamples)
1320
1321
1322 if len(f_indx):
1323 f_indx = combine_indexes(f_indx, self.nfeatures)
1324 else:
1325 f_indx = None
1326
1327 return s_indx, f_indx
1328
1329
1330 - def select(self, *args, **kwargs):
1331 """Universal selector
1332
1333 WARNING: if you need to select duplicate samples
1334 (e.g. samples=[5,5]) or order of selected samples of features
1335 is important and has to be not ordered (e.g. samples=[3,2,1]),
1336 please use selectFeatures or selectSamples functions directly
1337
1338 Examples:
1339 Mimique plain selectSamples::
1340
1341 dataset.select([1,2,3])
1342 dataset[[1,2,3]]
1343
1344 Mimique plain selectFeatures::
1345
1346 dataset.select(slice(None), [1,2,3])
1347 dataset.select('all', [1,2,3])
1348 dataset[:, [1,2,3]]
1349
1350 Mixed (select features and samples)::
1351
1352 dataset.select([1,2,3], [1, 2])
1353 dataset[[1,2,3], [1, 2]]
1354
1355 Select samples matching some attributes::
1356
1357 dataset.select(labels=[1,2], chunks=[2,4])
1358 dataset.select('labels', [1,2], 'chunks', [2,4])
1359 dataset['labels', [1,2], 'chunks', [2,4]]
1360
1361 Mixed -- out of first 100 samples, select only those with
1362 labels 1 or 2 and belonging to chunks 2 or 4, and select
1363 features 2 and 3::
1364
1365 dataset.select(slice(0,100), [2,3], labels=[1,2], chunks=[2,4])
1366 dataset[:100, [2,3], 'labels', [1,2], 'chunks', [2,4]]
1367
1368 """
1369 s_indx, f_indx = self.index(*args, **kwargs)
1370
1371
1372 if s_indx == slice(None):
1373
1374
1375 if __debug__:
1376 debug('DS', 'in select() not selecting samples')
1377 ds = self
1378 else:
1379
1380 if __debug__:
1381 debug('DS', 'in select() selecting samples given selections'
1382 + str(s_indx))
1383 ds = self.selectSamples(s_indx)
1384
1385
1386 if f_indx is not None:
1387 if __debug__:
1388 debug('DS', 'in select() selecting features given selections'
1389 + str(f_indx))
1390 ds = ds.selectFeatures(f_indx)
1391
1392 return ds
1393
1394
1395
1396 - def where(self, *args, **kwargs):
1397 """Obtain indexes of interesting samples/features. See select() for more information
1398
1399 XXX somewhat obsoletes idsby...
1400 """
1401 s_indx, f_indx = self.index(*args, **kwargs)
1402 if s_indx is not None and f_indx is not None:
1403 return s_indx, f_indx
1404 elif s_indx is not None:
1405 return s_indx
1406 else:
1407 return f_indx
1408
1409
1411 """Convinience dataset parts selection
1412
1413 See select for more information
1414 """
1415
1416 if len(args) == 1 and isinstance(args[0], tuple):
1417 args = args[0]
1418
1419 args_, args = args, ()
1420 for a in args_:
1421 if isinstance(a, slice) and \
1422 isinstance(a.start, basestring):
1423
1424 if a.stop is None or a.step is not None:
1425 raise ValueError, \
1426 "Selection must look like ['chunks':[2,3]]"
1427 args += (a.start, a.stop)
1428 else:
1429 args += (a,)
1430 return self.select(*args)
1431
1432
1433 - def permuteLabels(self, status, perchunk=True, assure_permute=False):
1434 """Permute the labels.
1435
1436 TODO: rename status into something closer in semantics.
1437
1438 :Parameters:
1439 status : bool
1440 Calling this method with set to True, the labels are
1441 permuted among all samples. If 'status' is False the
1442 original labels are restored.
1443 perchunk : bool
1444 If True permutation is limited to samples sharing the same
1445 chunk value. Therefore only the association of a certain
1446 sample with a label is permuted while keeping the absolute
1447 number of occurences of each label value within a certain
1448 chunk constant.
1449 assure_permute : bool
1450 If True, assures that labels are permutted, ie any one is
1451 different from the original one
1452 """
1453
1454 _data = self._data
1455
1456 if len(self.uniquelabels)<2:
1457 raise RuntimeError, \
1458 "Call to permuteLabels is bogus since there is insuficient" \
1459 " number of labels: %s" % self.uniquelabels
1460
1461 if not status:
1462
1463 if _data.get('origlabels', None) is None:
1464 raise RuntimeError, 'Cannot restore labels. ' \
1465 'permuteLabels() has never been ' \
1466 'called with status == True.'
1467 self.labels = _data['origlabels']
1468 _data.pop('origlabels')
1469 else:
1470
1471
1472 if not _data.has_key('origlabels') \
1473 or _data['origlabels'] == None:
1474
1475 _data['origlabels'] = _data['labels']
1476
1477 _data['labels'] = copy.copy(_data['labels'])
1478
1479 labels = _data['labels']
1480
1481 if perchunk:
1482 for o in self.uniquechunks:
1483 labels[self.chunks == o] = \
1484 N.random.permutation(labels[self.chunks == o])
1485 else:
1486 labels = N.random.permutation(labels)
1487
1488 self.labels = labels
1489
1490 if assure_permute:
1491 if not (_data['labels'] != _data['origlabels']).any():
1492 if not (assure_permute is True):
1493 if assure_permute == 1:
1494 raise RuntimeError, \
1495 "Cannot assure permutation of labels %s for " \
1496 "some reason with chunks %s and while " \
1497 "perchunk=%s . Should not happen" % \
1498 (self.labels, self.chunks, perchunk)
1499 else:
1500 assure_permute = 11
1501 if __debug__:
1502 debug("DS", "Recalling permute to assure different labels")
1503 self.permuteLabels(status, perchunk=perchunk,
1504 assure_permute=assure_permute-1)
1505
1506
1508 """Select a random set of samples.
1509
1510 If 'nperlabel' is an integer value, the specified number of samples is
1511 randomly choosen from the group of samples sharing a unique label
1512 value ( total number of selected samples: nperlabel x len(uniquelabels).
1513
1514 If 'nperlabel' is a list which's length has to match the number of
1515 unique label values. In this case 'nperlabel' specifies the number of
1516 samples that shall be selected from the samples with the corresponding
1517 label.
1518
1519 The method returns a Dataset object containing the selected
1520 samples.
1521 """
1522
1523 if isinstance(nperlabel, int):
1524 nperlabel = [ nperlabel for i in self.uniquelabels ]
1525
1526 sample = []
1527
1528 labels = self.labels
1529 for i, r in enumerate(self.uniquelabels):
1530
1531 sample += random.sample( (labels == r).nonzero()[0],
1532 nperlabel[i] )
1533
1534 return self.selectSamples( sample )
1535
1536
1537
1538
1539
1540
1541
1542
1543
1545 """Currently available number of patterns.
1546 """
1547 return self._data['samples'].shape[0]
1548
1549
1551 """Number of features per pattern.
1552 """
1553 return self._data['samples'].shape[1]
1554
1555
1557 """Stored labels map (if any)
1558 """
1559 return self._dsattr.get('labels_map', None)
1560
1561
1563 """Set labels map.
1564
1565 Checks for the validity of the mapping -- values should cover
1566 all existing labels in the dataset
1567 """
1568 values = Set(lm.values())
1569 labels = Set(self.uniquelabels)
1570 if not values.issuperset(labels):
1571 raise ValueError, \
1572 "Provided mapping %s has some existing labels (out of %s) " \
1573 "missing from mapping" % (list(values), list(labels))
1574 self._dsattr['labels_map'] = lm
1575
1576
1578 """Set the data type of the samples array.
1579 """
1580
1581 _data = self._data
1582
1583 if _data['samples'].dtype != dtype:
1584 _data['samples'] = _data['samples'].astype(dtype)
1585
1586
1588 """Assign `definition` to featuregroups
1589
1590 XXX Feature-groups was not finished to be useful
1591 """
1592 if not len(definition) == self.nfeatures:
1593 raise ValueError, \
1594 "Length of feature group definition %i " \
1595 "does not match the number of features %i " \
1596 % (len(definition), self.nfeatures)
1597
1598 self._dsattr['featuregroups'] = N.array(definition)
1599
1600
1602 """Returns a boolean mask with all features in `ids` selected.
1603
1604 :Parameters:
1605 ids: list or 1d array
1606 To be selected features ids.
1607
1608 :Returns:
1609 ndarray: dtype='bool'
1610 All selected features are set to True; False otherwise.
1611 """
1612 fmask = N.repeat(False, self.nfeatures)
1613 fmask[ids] = True
1614
1615 return fmask
1616
1617
1619 """Returns feature ids corresponding to non-zero elements in the mask.
1620
1621 :Parameters:
1622 mask: 1d ndarray
1623 Feature mask.
1624
1625 :Returns:
1626 ndarray: integer
1627 Ids of non-zero (non-False) mask elements.
1628 """
1629 return mask.nonzero()[0]
1630
1631
1632 @staticmethod
1634 """Common sanity check for Dataset copy constructor calls."""
1635
1636 samples = None
1637 if kwargs.has_key('samples'):
1638 samples = kwargs['samples']
1639 if samples is None and kwargs.has_key('data') \
1640 and kwargs['data'].has_key('samples'):
1641 samples = kwargs['data']['samples']
1642 if samples is None:
1643 raise DatasetError, \
1644 "`samples` must be provided to copy constructor call."
1645
1646 if not len(samples.shape) == 2:
1647 raise DatasetError, \
1648 "samples must be in 2D shape in copy constructor call."
1649
1650
1651
1652 nsamples = property( fget=getNSamples )
1653 nfeatures = property( fget=getNFeatures )
1654 labels_map = property( fget=getLabelsMap, fset=setLabelsMap )
1655
1657 """Decorator to easily bind functions to a Dataset class
1658 """
1659 if __debug__:
1660 debug("DS_", "Binding function %s to Dataset class" % func.func_name)
1661
1662
1663 setattr(Dataset, func.func_name, func)
1664
1665
1666 return func
1667
1668
1669
1670 Dataset._registerAttribute("samples", "_data", abbr='S', hasunique=False)
1671 Dataset._registerAttribute("labels", "_data", abbr='L', hasunique=True)
1672 Dataset._registerAttribute("chunks", "_data", abbr='C', hasunique=True)
1673
1674 Dataset._registerAttribute("origids", "_data", abbr='I', hasunique=False)
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 from mvpa.misc.state import ClassWithCollections, Collection
1708 from mvpa.misc.attributes import SampleAttribute, FeatureAttribute, \
1709 DatasetAttribute
1710
1711
1712 -class _Dataset(ClassWithCollections):
1713 """The successor of Dataset.
1714 """
1715
1716
1717
1718
1719 sa = None
1720 fa = None
1721 dsa = None
1722
1723
1724 samples = None
1725
1726 - def __init__(self, samples, sa=None, fa=None, dsa=None):
1727 """
1728 This is the generic internal constructor. Its main task is to allow
1729 for a maximum level of customization during dataset construction,
1730 including fast copy construction.
1731
1732 Parameters
1733 ----------
1734 samples : ndarray
1735 Data samples.
1736 sa : Collection
1737 Samples attributes collection.
1738 fa : Collection
1739 Features attributes collection.
1740 dsa : Collection
1741 Dataset attributes collection.
1742 """
1743
1744 ClassWithCollections.__init__(self)
1745
1746
1747
1748
1749
1750
1751
1752 self.samples = samples
1753
1754
1755
1756
1757
1758 for scol, tcol in ((sa, self.sa),
1759 (fa, self.fa),
1760 (dsa, self.dsa)):
1761
1762 if tcol is None:
1763
1764
1765 tcol = Collection(owner=self)
1766
1767
1768 if not scol is None:
1769 for name, attr in scol.items.iteritems():
1770
1771
1772 tcol.add(copy.copy(attr))
1773
1774
1775 @classmethod
1777
1778 """
1779 One line summary.
1780
1781 Long description.
1782
1783 Parameters
1784 ----------
1785 samples : ndarray
1786 The two-dimensional samples matrix.
1787 labels : ndarray
1788 chunks : ndarray
1789
1790 Returns
1791 -------
1792 blah blah
1793
1794 Notes
1795 -----
1796 blah blah
1797
1798 See Also
1799 --------
1800 blah blah
1801
1802 Examples
1803 --------
1804 blah blah
1805 """
1806
1807
1808
1809 labels_ = SampleAttribute(name='labels')
1810 labels_.value = labels
1811 chunks_ = SampleAttribute(name='chunks')
1812 chunks_.value = chunks
1813
1814
1815
1816
1817 sa = Collection(items={'labels': labels_, 'chunks': chunks_})
1818
1819
1820 return klass(samples, sa=sa)
1821
1822
1824 """Currently available number of patterns.
1825 """
1826 return self.samples.shape[0]
1827
1828
1830 """Number of features per pattern.
1831 """
1832 return self.samples.shape[1]
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928