feature_extraction.text.TfidfTransformer()

class sklearn.feature_extraction.text.TfidfTransformer(norm=u'l2', use_idf=True, smooth_idf=True, sublinear_tf=False)

2017-01-15 04:22:10
sklearn.feature_extraction.image.extract_patches_2d()

sklearn.feature_extraction.image.extract_patches_2d(image, patch_size, max_patches=None, random_state=None)

2017-01-15 04:26:03
feature_extraction.text.CountVectorizer()

class sklearn.feature_extraction.text.CountVectorizer(input=u'content', encoding=u'utf-8', decode_error=u'strict',

2017-01-15 04:22:08
sklearn.feature_extraction.image.reconstruct_from_patches_2d()

sklearn.feature_extraction.image.reconstruct_from_patches_2d(patches, image_size)

2017-01-15 04:26:05
feature_extraction.image.PatchExtractor()

class sklearn.feature_extraction.image.PatchExtractor(patch_size=None, max_patches=None, random_state=None)

2017-01-15 04:22:07
sklearn.feature_extraction.image.grid_to_graph()

sklearn.feature_extraction.image.grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=, dtype=)

2017-01-15 04:26:04
feature_extraction.text.TfidfVectorizer()

class sklearn.feature_extraction.text.TfidfVectorizer(input=u'content', encoding=u'utf-8', decode_error=u'strict',

2017-01-15 04:22:11
feature_extraction.text.HashingVectorizer()

class sklearn.feature_extraction.text.HashingVectorizer(input=u'content', encoding=u'utf-8', decode_error=u'strict'

2017-01-15 04:22:09
feature_extraction.FeatureHasher()

class sklearn.feature_extraction.FeatureHasher(n_features=1048576, input_type='dict', dtype=, non_negative=False)

2017-01-15 04:22:06
sklearn.feature_extraction.image.img_to_graph()

sklearn.feature_extraction.image.img_to_graph(img, mask=None, return_as=, dtype=None)

2017-01-15 04:26:04