Python源码示例:sklearn.decomposition.FastICA()

示例1
def fit_fastICA(data):
    '''
        Fit the model with fast ICA principal components
    '''
    # keyword parameters for the PCA
    kwrd_params = {
        'n_components': 5, 
        'algorithm': 'parallel', 
        'whiten': True
    }

    # reduce the data
    reduced = reduceDimensions(cd.FastICA, 
        data, **kwrd_params)

    # prepare the data for the classifier
    data_l = prepare_data(data, reduced, 
        kwrd_params['n_components'])

    # fit the model
    class_fit_predict_print(data_l) 
示例2
def dim_reduction_method(self):
        """
        select dimensionality reduction method
        """
        if self.dim_reduction=='pca':
            return PCA()
        elif self.dim_reduction=='factor-analysis':
            return FactorAnalysis()
        elif self.dim_reduction=='fast-ica':
            return FastICA()
        elif self.dim_reduction=='kernel-pca':
            return KernelPCA()
        elif self.dim_reduction=='sparse-pca':
            return SparsePCA()
        elif self.dim_reduction=='truncated-svd':
            return TruncatedSVD()
        elif self.dim_reduction!=None:
            raise ValueError('%s is not a supported dimensionality reduction method. Valid inputs are: \
                             "pca","factor-analysis","fast-ica,"kernel-pca","sparse-pca","truncated-svd".' 
                             %(self.dim_reduction)) 
示例3
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.decomposition.PCA, decomposition.PCA)
        self.assertIs(df.decomposition.IncrementalPCA,
                      decomposition.IncrementalPCA)
        self.assertIs(df.decomposition.KernelPCA, decomposition.KernelPCA)
        self.assertIs(df.decomposition.FactorAnalysis,
                      decomposition.FactorAnalysis)
        self.assertIs(df.decomposition.FastICA, decomposition.FastICA)
        self.assertIs(df.decomposition.TruncatedSVD, decomposition.TruncatedSVD)
        self.assertIs(df.decomposition.NMF, decomposition.NMF)
        self.assertIs(df.decomposition.SparsePCA, decomposition.SparsePCA)
        self.assertIs(df.decomposition.MiniBatchSparsePCA,
                      decomposition.MiniBatchSparsePCA)
        self.assertIs(df.decomposition.SparseCoder, decomposition.SparseCoder)
        self.assertIs(df.decomposition.DictionaryLearning,
                      decomposition.DictionaryLearning)
        self.assertIs(df.decomposition.MiniBatchDictionaryLearning,
                      decomposition.MiniBatchDictionaryLearning)

        self.assertIs(df.decomposition.LatentDirichletAllocation,
                      decomposition.LatentDirichletAllocation) 
示例4
def ICA(data, dim):
    ica = FastICA(n_components=dim)
    ica.fit(data)
    return ica.transform(data) 
示例5
def bsoid_umap_embed(f_10fps_sc, umap_params=UMAP_PARAMS):
    """
    Trains UMAP (unsupervised) given a set of features based on (x,y) positions
    :param f_10fps_sc: 2D array, standardized/session features
    :param umap_params: dict, UMAP params in GLOBAL_CONFIG
    :return trained_umap: object, trained UMAP transformer
    :return umap_embeddings: 2D array, embedded UMAP space
    """
    ###### So far, use of PCA is not necessary. If, however, features go beyond 100, consider taking top 50 PCs #####
    # if f_10fps_sc.shape[0] > 50:
    #     logging.info('Compressing {} instances from {} D '
    #                  'into {} D using PCA'.format(f_10fps_sc.shape[1], f_10fps_sc.shape[0],
    #                                               50))
    #     feats_train = PCA(n_components=50, random_state=23).fit_transform(f_10fps_sc.T)
    #     pca = PCA(n_components=50).fit(f_10fps_sc.T)
    #     logging.info('Done linear transformation with PCA.')
    #     logging.info('The top {} Principal Components '
    #                  'explained {}% variance'.format(50, 100 * np.sum(pca.explained_variance_ratio_)))
    ################ FastICA potentially useful for demixing signal ################
    # lowd_feats = FastICA(n_components=10, random_state=23).fit_transform(f_10fps.T)
    # feats_train = lowd_feats
    feats_train = f_10fps_sc.T
    logging.info('Transforming all {} instances from {} D into {} D'.format(feats_train.shape[0],
                                                                            feats_train.shape[1],
                                                                            umap_params.get('n_components')))
    trained_umap = umap.UMAP(n_neighbors=int(round(np.sqrt(feats_train.shape[0]))),  # power law
                             **umap_params).fit(feats_train)
    umap_embeddings = trained_umap.embedding_
    logging.info('Done non-linear transformation with UMAP from {} D into {} D.'.format(feats_train.shape[1],
                                                                                        umap_embeddings.shape[1]))
    return trained_umap, umap_embeddings 
示例6
def __init__(self, **kwargs):
        super().__init__()
        self.estimator = sk_d.FastICA(**kwargs) 
示例7
def calculate(self, n_components: int = 4, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
        if n_components is None:
            n_components = self.X.shape[-1]

        mdl = FastICA(n_components=n_components, **kwargs)
        self.ims = mdl.fit_transform(self.X.collapse()).reshape(self.X.data.shape[:-1] + (n_components,))
        self.spcs = mdl.components_.transpose()

        return self.ims, self.spcs 
示例8
def img2_coord(img, init=None):

    assert np.max(img) <= 1.0

    if init is None:
        init = np.zeros((img.shape[0] + 200, img.shape[1] + 200))

    init[100:-100, 100:-100] = img

    img = init

    img_size = img.shape[0]

    tile_x = np.tile(np.arange(img_size), (img_size, 1))
    tile_y = tile_x.T

    mean_x = np.sum(img * tile_x) / np.sum(img)
    mean_y = np.sum(img * tile_y) / np.sum(img)

    dist_mean_x = np.abs(mean_x - tile_x) * img
    dist_mean_y = np.abs(mean_y - tile_y) * img

    hypo = np.max(((dist_mean_x * dist_mean_x) + (dist_mean_y * dist_mean_y)))

    diff_mean_x = tile_x[img > 0].flatten() - mean_x
    diff_mean_y = tile_y[img > 0].flatten() - mean_y

    m = np.stack([diff_mean_x, diff_mean_y])

    decomposer = FastICA(2)
    decomposer.fit(m.T)
    Uica = decomposer.mixing_

    # print('ICA vectors')
    norms = np.sqrt((Uica ** 2).sum(axis=0))
    Uica = Uica / np.sqrt((Uica ** 2).sum(axis=0))
    if norms[0] > norms[1]:
        rotate = -np.arctan2(Uica[0, 0], Uica[1, 0])
    else:
        rotate = -np.arctan2(Uica[0, 1], Uica[1, 1])

    # represent between [-math.pi, math.pi]
    if rotate < -math.pi / 2:
        rotate += math.pi
    elif rotate > math.pi / 2:
        rotate -= math.pi

    # print('rotate: {:.2f} [deg]'.format(rotate * 360 / 2 / 3.14))

    aspect_ratio = max(norms) / min(norms)
    # print('aspect ratio: {:.2f}'.format(aspect_ratio))

    width = np.sqrt(hypo / (1 + aspect_ratio**2)) * 2 + 0.25
    # height = np.sqrt(hypo * aspect_ratio**2 / (1 + aspect_ratio**2)) * 2
    height = width * aspect_ratio

    # print('width: {} height: {}'.format(width, height))

    return mean_x, mean_y, height, aspect_ratio, rotate, img_size