Ejemplo n.º 1
0
def tucker_on_error_tensor(error_tensor, ranks=[15, 4, 2, 2, 8, 15], save_results=False, verbose=False):
    
    tensor_pred = np.nan_to_num(error_tensor)
    tensor_from_fac = np.zeros(error_tensor.shape)
    errors = []
    num_iterations = 0
    Ω = get_omega(error_tensor)

    # while(not stopping_condition(tensor, tensor_from_fac, threshold)):
    while(len(errors) <= 2 or errors[-1] < errors[-2] - 0.01):
        
        num_iterations += 1
        core, factors = tucker(tensor_pred, ranks=ranks)
        tensor_from_fac = tucker_to_tensor((core, factors))
        error = np.linalg.norm(np.multiply(Ω, np.nan_to_num(error_tensor - tensor_from_fac)))
        
        if verbose:
            if not num_iterations % 5:
                print("ranks: {}, iteration {}, error: {}".format(ranks, num_iterations, error))

        errors.append(error)
        tensor_pred = np.nan_to_num(error_tensor) + np.multiply(1-Ω, tensor_from_fac)
    
    core, factors = tucker(tensor_pred, ranks=ranks)
    
    if save_results:
        np.save(os.path.join(defaults_path, 'error_tensor_imputed.npy'), tensor_pred)
    return core, factors, tensor_pred, errors
Ejemplo n.º 2
0
def MVspeClust(X, Y, k, nnr):
#    K = np.reshape(MVkernalize(X, Y, nnr), [X.shape[0],-1])
#    u, _, _ = np.linalg.svd(normalize(K))
#    return KMeans(n_clusters = k).fit(u[:,:k]).labels_    
    K = MVkernelize(X, Y, nnr)
    _, factors = tucker(K, ranks = [k,k,k])
    return KMeans(n_clusters = k).fit(factors[0]).labels_    
Ejemplo n.º 3
0
def factorize_dense_tk(tensor, params):
    input_shape, output_shape, ranks = params["input_shape"], params[
        "output_shape"], params["ranks"]

    shape = tensor.shape
    assert len(shape) == 2, "The input tensor should be 2-order."

    input_order, output_order = len(input_shape), len(output_shape)
    assert input_order + output_order == len(ranks), \
      "The length of ranks should be equal to the sum of lengths of input_shape and output_shape."

    assert shape[0] == np.prod(input_shape), \
      "The product of input shape should be equal to the first dimension of the input tensor."
    assert shape[1] == np.prod(output_shape), \
      "The product of output shape should be equal to the second dimension of the input tensor."

    tensor = np.reshape(tensor, input_shape + output_shape)
    core_factor, factors = tucker(tensor, ranks)

    input_factors, output_factors = factors[:input_order], factors[
        input_order:]
    for l in range(output_order):
        output_factors[l] = np.transpose(output_factors[l])

    return [input_factors, core_factor, output_factors]
Ejemplo n.º 4
0
def select_filters(model, valid_loader, valid_set, remove_amount, device):
    """
    worst : list of highest divergence filters (worst filters) across batches
            Can select top-k afterwards.
    imp   : list of divergences from tensor decomposition reconstruction.
            lower means filter is more important.
    """
    worst = []
    model.eval()
    for i, data in tqdm(enumerate(valid_loader),
                        total=len(valid_set) / valid_loader.batch_size):
        out, y = data
        out = out.to(device)
        y = y
        for j, (name, param) in enumerate(model.named_children()):
            out = param(out)
            if j == 0:
                break
        nout = out.detach()

        cp = dc.tucker(nout, 15)
        pred = tl.tucker_tensor.tucker_to_tensor(cp)
        dist = torch.cdist(pred, nout)
        importance = torch.mean(dist, dim=[0, 2, 3])
        _, w = torch.topk(importance, remove_amount)
        worst.append(w)

        if i == (len(valid_set) // valid_loader.batch_size) // 4:
            break
    return worst
Ejemplo n.º 5
0
    def recover(self):
        '''
        Obtain the recovered tensor X_hat, core and arm tensor given the sketches
        using the two pass sketching algorithm 
        '''

        # get orthogonal basis for each arm
        Qs = []
        for sketch in self.arm_sketches:
            Q, _ = np.linalg.qr(sketch)
            Qs.append(Q)

        #get the core_(smaller) to implement tucker
        core_tensor = self.X
        N = len(self.X.shape)
        for mode_n in range(N):
            Q = Qs[mode_n]
            core_tensor = tl.tenalg.mode_dot(core_tensor, Q.T, mode=mode_n)
        core_tensor, factors = tucker(core_tensor, ranks=self.ranks)
        self.core_tensor = core_tensor

        #arm[n] = Q.T*factors[n]
        for n in range(len(factors)):
            self.arms.append(np.dot(Qs[n], factors[n]))
        X_hat = tl.tucker_to_tensor(self.core_tensor, self.arms)
        return X_hat, self.arms, self.core_tensor
Ejemplo n.º 6
0
    def fit(self, print_error = False, f = 1):
        """ fit the model according to ITA rule"""
        
        # initialize U start and V start
        _, self.Ulist = tucker(self.t[:,:,:,0], rank=[self.r1, self.r2, self.r3])
        self.Dlist = [np.zeros(self.r1), np.zeros(self.r2), np.zeros(self.r3)]

        self.reconerror = []
        self.error = []
        
        for i in range(self.T):
            # run sta update
            X = self.t[:,:,:,i]
            for j in range(3):
                self.ita(self.Ulist, self.Dlist, j, X, min(i+1, 200),forget=f)
            
            # reconstruct and find error
            temp = tl.tenalg.multi_mode_dot(X, [i.T for i in self.Ulist])
            that = tl.tenalg.multi_mode_dot(temp, [i for i in self.Ulist])
            emat = that - X
            
            self.reconerror.append(np.linalg.norm(emat) ** 2)
            self.error.append(self.reconerror[-1] / np.linalg.norm(X) ** 2)
            
            if print_error:
                print(self.reconerror[-1])
            
        self.meanerr = np.mean(self.error)
Ejemplo n.º 7
0
    def recover(self):
        '''
        Obtain the recovered tensor X_hat, core and arm tensor given the sketches
        using the one pass sketching algorithm 
        '''

        if self.phis == []:
            phis = self.get_phis()
        else:
            phis = self.phis
        Qs = []
        for arm_sketch in self.arm_sketches:
            Q, _ = np.linalg.qr(arm_sketch)
            Qs.append(Q)
        self.core_tensor = self.core_sketch
        dim = len(self.tensor_shape)
        for mode_n in range(dim):
            self.core_tensor = tl.tenalg.mode_dot(self.core_tensor, \
                np.linalg.pinv(np.dot(phis[mode_n], Qs[mode_n])), mode=mode_n)
        core_tensor, factors = tucker(self.core_tensor, ranks=self.ranks)
        self.core_tensor = core_tensor
        for n in range(dim):
            self.arms.append(np.dot(Qs[n], factors[n]))
        X_hat = tl.tucker_to_tensor(self.core_tensor, self.arms)
        return X_hat, self.arms, self.core_tensor
Ejemplo n.º 8
0
    def __init__(self, layer, ranks, init=True):
        """
        Class initializer.
        """
        super(DecomposedLinear, self).__init__()

        device = layer.weight.device
        weight = layer.weight.data
        out_dim, in_dim = weight.shape
        out_rank, in_rank = ranks

        self.in_layer = nn.Linear(in_features=in_dim,
                                  out_features=in_rank,
                                  bias=False).to(device)

        self.core_layer = nn.Linear(in_features=in_rank,
                                    out_features=out_rank,
                                    bias=False).to(device)

        self.out_layer = nn.Linear(in_features=out_rank,
                                   out_features=out_dim,
                                   bias=layer.bias is not None).to(device)

        if init:
            core, factors = decomp.tucker(weight, ranks=ranks, init='svd')
            out_factor, in_factor = factors

            if self.out_layer.bias is not None:
                self.out_layer.bias.data = layer.bias.data

            self.in_layer.weight.data = torch.transpose(in_factor, 1, 0)
            self.out_layer.weight.data = out_factor
            self.core_layer.weight.data = core
Ejemplo n.º 9
0
def get_decomposed_tensor(i, obs_dir, ranks):
    tf.enable_eager_execution()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    X = load_observation(obs_dir)

    if type(X) == np.ndarray and X.shape[0] == 65:
        X = np.reshape(X, (65, 100, 116, 116))
        X_tf = tf.convert_to_tensor(X, dtype=tf.float32)
        tl.set_backend('tensorflow')
        core, factors = tucker(X_tf,
                               ranks=ranks,
                               init='random',
                               tol=10e-5,
                               verbose=False)
        transition_features = core.numpy().flatten()
        line = transition_features
        out = line

        print('Completed line {}'.format(i))

    else:
        print('Skipped line {}'.format(i))

        out = []
    sess.close()
    return out
Ejemplo n.º 10
0
 def fit_tensors(self):
     start = time.time()
     print('Fitting Tucker decomposition...')
     core, factors = tucker(self.raw_data, ranks=[100, 50, 50])
     end = time.time()
     print('Tucker done in:', end - start)
     self.core = core
     self.factors = factors
Ejemplo n.º 11
0
    def from_tensor(cls, tensor, tensorized_row_shape, tensorized_column_shape, rank, n_matrices=(), **kwargs):
        n_matrices = _ensure_tuple(n_matrices)
        rank = tl.tucker_tensor.validate_tucker_rank(n_matrices + tensorized_row_shape + tensorized_column_shape, rank)

        with torch.no_grad():
            core, factors = tucker(tensor, rank, **kwargs)
        
        return cls(nn.Parameter(core), [nn.Parameter(f) for f in factors], tensorized_row_shape, tensorized_column_shape, rank, n_matrices=n_matrices)
Ejemplo n.º 12
0
    def init_from_tensor(self, tensor, init='svd', **kwargs):
        with torch.no_grad():
            core, factors = tucker(tensor, self.rank, **kwargs)
        
        self.core = nn.Parameter(core)
        self.factors = FactorList([nn.Parameter(f) for f in factors])

        return self
Ejemplo n.º 13
0
    def from_tensor(cls, tensor, rank='same', fixed_rank_modes=None, **kwargs):
        shape = tensor.shape
        rank = tl.tucker_tensor.validate_tucker_rank(
            shape, rank, fixed_modes=fixed_rank_modes)

        with torch.no_grad():
            core, factors = tucker(tensor, rank, **kwargs)

        return cls(nn.Parameter(core.contiguous()),
                   [nn.Parameter(f.contiguous()) for f in factors])
def rank_search_tucker(tensor, rank_range):
    AIC = {}
    for rank in combinations_with_replacement(range(1, rank_range + 1), 3):
        decomp = tucker(tensor, rank)
        recon = tl.tucker_to_tensor(decomp)
        err = tensor - recon
        rank_AIC = 2 * tl.tenalg.inner(err, err) + 2 * sum(rank)
        AIC[rank] = rank_AIC

    return AIC
Ejemplo n.º 15
0
    def init_from_tensor(self,
                         tensor,
                         unsqueezed_modes=None,
                         unsqueezed_init='average',
                         **kwargs):
        """Initialize the tensor factorization from a tensor

        Parameters
        ----------
        tensor : torch.Tensor
            full tensor to decompose
        unsqueezed_modes : int list
            list of modes for which the rank is 1 that don't correspond to a mode in the full tensor
            essentially we are adding a new dimension for which the core has dim 1, 
            and that is not initialized through decomposition.
            Instead first `tensor` is decomposed into the other factors. 
            The `unsqueezed factors` are then added and  initialized e.g. with 1/dim[i]
        unsqueezed_init : 'average' or float
            if unsqueezed_modes, this is how the added "unsqueezed" factors will be initialized
            if 'average', then unsqueezed_factor[i] will have value 1/tensor.shape[i]
        """
        if unsqueezed_modes is not None:
            unsqueezed_modes = sorted(unsqueezed_modes)
            for mode in unsqueezed_modes[::-1]:
                if self.rank[mode] != 1:
                    msg = 'It is only possible to initialize by averagig over mode for which rank=1.'
                    msg += f'However, got unsqueezed_modes={unsqueezed_modes} but rank[{mode}]={self.rank[mode]} != 1.'
                    raise ValueError(msg)

            rank = tuple(r for (i, r) in enumerate(self.rank)
                         if i not in unsqueezed_modes)
        else:
            rank = self.rank

        with torch.no_grad():
            core, factors = tucker(tensor, rank, **kwargs)

            if unsqueezed_modes is not None:
                # Initialise with 1/shape[mode] or given value
                for mode in unsqueezed_modes:
                    size = self.shape[mode]
                    factor = torch.ones(size, 1)
                    if unsqueezed_init == 'average':
                        factor /= size
                    else:
                        factor *= unsqueezed_init
                    factors.insert(mode, factor)
                    core = core.unsqueeze(mode)

        self.core = nn.Parameter(core.contiguous())
        self.factors = FactorList([nn.Parameter(f) for f in factors])
        return self
Ejemplo n.º 16
0
def main():
    start_time = time.time()
    args = parse_arguments()
    lines, verb2id, subject2id, object2id = get_dict_and_samples(
        args.input_path, args.min_count, args.first_n, args.step)

    if args.sparse:
        large_tensor = create_sparse_tensor(lines, verb2id, subject2id,
                                            object2id)
        print("Decomposition started")
        if args.algorithm == 'tucker':
            weights, factors = partial_tucker(large_tensor,
                                              modes=[0, 1, 2],
                                              rank=args.embedding_size,
                                              init='random')
        else:
            weights, factors = sparse_parafac(large_tensor,
                                              rank=args.embedding_size,
                                              init='random')
    else:
        tl.set_backend('pytorch')

        large_tensor = create_tensor(lines, verb2id, subject2id, object2id)
        print("Decomposition started")
        if args.algorithm == 'tucker':
            weights, factors = tucker(large_tensor,
                                      rank=args.embedding_size,
                                      init='random')
        else:
            weights, factors = parafac(large_tensor,
                                       rank=args.embedding_size,
                                       init='random')
        factors = [factor.cpu().numpy().astype(float) for factor in factors]

    assert [factor.shape[0] for factor in factors
            ] == [len(verb2id), len(subject2id),
                  len(object2id)]

    output_path = os.path.join(
        args.output_path,
        f"{args.input_path[5:13]}-{args.algorithm}_e{args.embedding_size}_"
        f"min-count-{args.min_count}_cut-{args.first_n}_step-{args.step}")

    if not os.path.exists(output_path):
        os.mkdir(output_path)

    save_to_file(factors[0], verb2id, os.path.join(output_path, 'verbs.tsv'))
    save_to_file(factors[1], subject2id,
                 os.path.join(output_path, 'subjects.tsv'))
    save_to_file(factors[2], object2id, os.path.join(output_path,
                                                     'objects.tsv'))
    print(f"---- Took {(time.time() - start_time)} seconds ----")
Ejemplo n.º 17
0
def tucker_decomposition_fc_layer(layer, rank):
    core, [l, r] = tucker(layer.weight.data, rank=rank)
    
    right_layer = torch.nn.Linear(r.shape[0], r.shape[1])
    core_layer = torch.nn.Linear(core.shape[1], core.shape[0])
    left_layer = torch.nn.Linear(l.shape[1], l.shape[0])
    
    left_layer.bias.data = layer.bias.data
    left_layer.weight.data = l
    right_layer.weight.data = r.T

    new_layers = [right_layer, core_layer, left_layer]
    return nn.Sequential(*new_layers)
Ejemplo n.º 18
0
def tucker_decomposition(X):
    N,C,H,W = X.shape
    rank = 4
    tucker_rank = [C,rank,rank]
    Tucker_reconstructions = torch.zeros_like(X).cpu()
    Cores = torch.zeros([N,C,rank,rank])
    for j,img in enumerate(X):
        core,tucker_factors = tucker(img,ranks = tucker_rank,init = 'random', tol = 1e-4,  random_state=np.random.RandomState())
        tucker_reconstruction = tl.tucker_to_tensor((core,tucker_factors))
        Tucker_reconstructions[j] = tucker_reconstruction
        Cores[j] = core

    return Cores
Ejemplo n.º 19
0
 def ho_svd(self):
     X = square_tensor_gen(self.n,
                           self.rank,
                           dim=self.dim,
                           typ=self.gen_typ,
                           noise_level=self.noise_level)
     start_time = time.time()
     core, tucker_factors = tucker(
         X, ranks=[self.rank for _ in range(self.dim)], init='random')
     X_hat = tl.tucker_to_tensor(core, tucker_factors)
     running_time = time.time() - start_time
     rerr = eval_mse(X, X_hat)
     return (-1, running_time), rerr
Ejemplo n.º 20
0
def test_decompose_test():
	tr = tl.tensor(np.arange(24).reshape(3,4,2))
	print(tr)
	unfolded = tl.unfold(tr, mode=0)
	tl.fold(unfolded, mode=0, shape=tr.shape)

	#Apply Tucker decomposition
	core, factors = tucker(tr, rank=[3,4,2])
	print ("Core")
	print (core)
	print ("Factors")
	print (factors)

	print(tl.tucker_to_tensor(core, factors))
Ejemplo n.º 21
0
def test_tucker_decomposition(max_d_size, num_dims, d_interval, max_rank,
                              rank_interval, num_samples):
    """
    Purpose:
        benchmark the tucker decomposition of a randomly generated tucker decomposable tensor
        run tests using hypercube tensors for consistency
    :param max_d_size: maximum dimension size that each mode will reach
    :param num_dims: number of dimensions to test along
    :param d_interval: size of interval to jump by for each data point
    :param max_rank: maximum rank to test against
    :param rank_interval: size of interval for rank to jump by for each data point
    :param num_samples: number of items to sample over for each data point
    """
    rand_state = 5
    for r in range(1, max_rank, rank_interval):
        dims = []
        times = []
        for d in range(r, max_d_size, d_interval):
            time_sum = 0
            print(d)
            for n in range(0, num_samples):
                shp = tuple([d] * num_dims)
                t = rnd.tucker_tensor(shp,
                                      r,
                                      full=True,
                                      random_state=rand_state)
                start = time.time()
                tucker(t, tol=10e-6, random_state=rand_state)
                end = time.time()
                time_sum += end - start
            dims.append(d)
            times.append(time_sum / num_samples)
        plt.plot(dims, times, label='r = ' + str(r))
    plt.xlabel("Matrix dimension (square matrix)")
    plt.ylabel("Time elapsed (sec)")
    plt.legend(loc='best')
    plt.savefig('test_tucker_decomposition.eps', format='eps', dpi=1000)
def tucker_core(imgarray, tucker_rank):
    """
    imgarray is 3-d numpy array of png image
    """
    imgtensor = tl.tensor(imgarray)
    # Tucker decomposition
    core, tucker_factors = tucker(imgtensor,
                                  ranks=tucker_rank,
                                  init='random',
                                  tol=10e-5,
                                  random_state=random_state)
    # print(tucker_factors[0].shape)
    # print(tucker_factors[1].shape)
    # print(tucker_factors[2].shape)
    return core, tucker_factors
def ordinary_tk(T,r1,r2,r3):
    for i in range(2):
        core, factors = tucker(T, rank=[r1,r2,r3])
        print('--------------')
        print(  i,'compare test:    '
            'factors[0]:',np.linalg.norm(factors[0]),
            '    factors[1]',np.linalg.norm(factors[1]),
            '    factors[2]',np.linalg.norm(factors[2]),
            '    core',np.linalg.norm(core))
        print('element sum'
            '               ',factors[0].sum(),
            '               ',factors[1].sum(),
            '            ',factors[2].sum(),
            '       ',core.sum())

        print('calculate||T - X|| :',calculate_error(T,core,factors[0],factors[1],factors[2],0))
Ejemplo n.º 24
0
def test_decomp():
    tr = vec_to_tensor(np.array([1, -1]))
    print(tr)
    factors = parafac(tr, rank=1)
    print('Parafac: {}'.format(factors))
    tucker_facs, _ = tucker(tr, ranks=[2, 2, 1])
    print('Tucker factors: {}'.format(tucker_facs))
    a = vec_to_tensor(factors[0])
    b = vec_to_tensor(factors[1])
    c = vec_to_tensor(factors[2])
    d1 = three_vecs_to_tensor(factors[0], factors[1], factors[2])
    print(d1)
    t1 = vec_to_tensor(tucker_facs[0])
    t2 = vec_to_tensor(tucker_facs[1])
    print(t1)
    print(t2)
Ejemplo n.º 25
0
def tucker_decomp_tens(arr_3d, dim=5):
    features_n = len(arr_3d[0, 0, :])
    tucker_tensor = tucker(arr_3d,
                           rank=[1, features_n, dim],
                           verbose=-2,
                           random_state=1)  ## (x,y,z) (x can change)
    print("feature_x_dimension matrix")
    tuck_fd_2d = tucker_tensor[1][2]
    print(tuck_fd_2d.shape)
    print("steps_x_feature matrix")
    tuck_sf_2d = tucker_tensor[1][1]
    print(tuck_sf_2d.shape)
    tuck_fd_2d_2 = tucker_tensor[0][0]  ## one to compare with
    tuck_i_1d = tucker_tensor[1][0]

    return tuck_fd_2d, tuck_sf_2d, tuck_fd_2d_2, tuck_i_1d
Ejemplo n.º 26
0
    def forward(ctx, imgs):
        # Tucker_reconstructions = np.zeros_like(imgs)
        Tucker_reconstructions = torch.zeros_like(imgs).cpu()
        tucker_rank = [3, rank, rank]
        for j, img in enumerate(imgs):
            core, tucker_factors = tucker(img,
                                          ranks=tucker_rank,
                                          init='random',
                                          tol=1e-4,
                                          random_state=np.random.RandomState())
            tucker_reconstruction = tl.tucker_to_tensor((core, tucker_factors))
            Tucker_reconstructions[j] = tucker_reconstruction

        # Tucker_reconstructions = torch.from_numpy(Tucker_reconstructions)

        return Tucker_reconstructions
def tucker_filter(imgarray, tucker_rank, compfilename):
    """
    imgarray is 3-d numpy array of png image
    """
    imgtensor = tl.tensor(imgarray)

    # Tucker decomposition
    core, tucker_factors = tucker(imgtensor,
                                  ranks=tucker_rank,
                                  init='random',
                                  tol=10e-5,
                                  random_state=random_state)
    tucker_reconstruction = tl.tucker_to_tensor(core, tucker_factors)
    Image.fromarray(to_image(tucker_reconstruction)).save('../input/' +
                                                          compfilename)
    return 0
Ejemplo n.º 28
0
def representive_connMatrix_tucker(conn_seg):
    """ extract one representive connectivity matrix from a series of connective matrices for each time segment
		based on Tucker decomposition

		@ parameter conn_seg: a series of connective matrics for a time segment (n_chns * n_chns * n_times)

		return conn_repre: one representive connective matrix for this time segment

	"""
    rank = [15, 15, 3]
    core, factors = tucker(conn_seg, ranks=rank)
    recon_conn_seg = tucker_to_tensor(core,
                                      factors)  # recon_conn_seg: 64*64*60
    conn_repre = np.mean(
        recon_conn_seg, axis=2
    )  # conn_repre: n_chns * n_chns, connective matrix summary for each segment

    return conn_repre
Ejemplo n.º 29
0
def ALTO_single(X, Coretensorsize, K, Pre_existingfactors,
                sigma):  #All the parameters are tensors
    ListoffactorsU = list(Pre_existingfactors)
    ListoffactorsV = Augmentlist(ListoffactorsU, K, sigma)
    Stilde = Tensor_matrixproduct(
        X, Operations_listmatrices(ListoffactorsV, "Transpose"))
    #core,factors=tucker(Stilde,Coretensorsize,init='random',random_state=2): this was the initial line
    core, factors = tucker(Stilde,
                           Coretensorsize,
                           init='random',
                           random_state=1)
    Listoffactorsresult = []
    for i in range(len(factors)):
        Listoffactorsresult.append(
            np.dot(mxnet_backend.to_numpy(ListoffactorsV[i]),
                   mxnet_backend.to_numpy(factors[i])))
    Listoffactorsresult = Operations_listmatrices(Listoffactorsresult,
                                                  "Tensorize")
    return core, Listoffactorsresult
def progressive_generation(imgfile, rankend, result_path):
    # max effective rankend is min(imgarray.shape[0],imgarray.shape[1])
    target = imgfile
    imgobj = Image.open(target)
    imgarray = np.array(imgobj)
    imgtensor = tl.tensor(imgarray)

    for rank in range(1, rankend + 1):
        fullfname = os.path.join(
            result_path,
            imgfile.split('/')[-1].split('.')[0] + '_' + str(rank) + '.png')
        core_rank = [rank, rank, 1]
        core, tucker_factors = tucker(imgtensor,
                                      ranks=core_rank,
                                      init='random',
                                      tol=10e-5,
                                      random_state=random_state)
        tucker_reconstruction = tl.tucker_to_tensor(core, tucker_factors)
        Image.fromarray(to_image(tucker_reconstruction)).save(fullfname)