Ejemplo n.º 1
0
 def __init__(self, X, arm_sketches, ranks):
     tl.set_backend('numpy')
     self.arms = []
     self.core_tensor = None
     self.X = X
     self.arm_sketches = arm_sketches
     self.ranks = ranks
Ejemplo n.º 2
0
 def fit(
     self,
     *views: np.ndarray,
 ):
     self.n_views = len(views)
     self.check_params()
     assert (len(
         self.c) == len(views)), 'c requires as many values as #views'
     train_views, covs_invsqrt = self.setup_tensor(*views)
     for i, el in enumerate(train_views):
         if i == 0:
             M = el
         else:
             for _ in range(len(M.shape) - 1):
                 el = np.expand_dims(el, 1)
             M = np.expand_dims(M, -1) @ el
     M = np.mean(M, 0)
     tl.set_backend('numpy')
     M_parafac = parafac(M, self.latent_dims, verbose=True)
     self.alphas = [
         cov_invsqrt @ fac for i, (view, cov_invsqrt, fac) in enumerate(
             zip(train_views, covs_invsqrt, M_parafac.factors))
     ]
     self.score_list = [
         view @ self.alphas[i] for i, view in enumerate(train_views)
     ]
     self.weights_list = [
         weights / np.linalg.norm(score)
         for weights, score in zip(self.alphas, self.score_list)
     ]
     self.score_list = [
         view @ self.weights_list[i] for i, view in enumerate(train_views)
     ]
     self.train_correlations = self.predict_corr(*views)
     return self
Ejemplo n.º 3
0
 def fit(self, *views: Tuple[np.ndarray, ...], ):
     if self.c is None:
         self.c = [0] * len(views)
     assert (len(self.c) == len(views)), 'c requires as many values as #views'
     z = self.demean_data(*views)
     n = z[0].shape[0]
     covs = [(1 - self.c[i]) * view.T @ view / (1 - n) + self.c[i] * np.eye(view.shape[1]) for i, view in
             enumerate(z)]
     covs_invsqrt = [np.linalg.inv(sqrtm(cov)) for cov in covs]
     z = [z_ @ cov_invsqrt for z_, cov_invsqrt in zip(z, covs_invsqrt)]
     for i, el in enumerate(z):
         if i == 0:
             M = el
         else:
             for _ in range(len(M.shape) - 1):
                 el = np.expand_dims(el, 1)
             M = np.expand_dims(M, -1) @ el
     M = np.mean(M, 0)
     # for i, cov_invsqrt in enumerate(covs_invsqrt):
     #    M = np.tensordot(M, cov_invsqrt, axes=[[0], [0]])
     tl.set_backend('numpy')
     M_parafac = parafac(M, self.latent_dims, verbose=True)
     self.weights_list = [cov_invsqrt @ fac for i, (view, cov_invsqrt, fac) in
                          enumerate(zip(z, covs_invsqrt, M_parafac.factors))]
     self.score_list = [view @ self.weights_list[i] for i, view in enumerate(z)]
     self.weights_list = [weights / np.linalg.norm(score) for weights, score in
                          zip(self.weights_list, self.score_list)]
     self.score_list = [view @ self.weights_list[i] for i, view in enumerate(z)]
     self.train_correlations = self.predict_corr(*views)
     return self
Ejemplo n.º 4
0
 def loss(self, *views):
     m = views[0].size(0)
     views = _demean(*views)
     covs = [
         (1 - self.r) * view.T @ view
         + self.r * torch.eye(view.size(1), device=view.device)
         for view in views
     ]
     whitened_z = [
         view @ mat_pow(cov, -0.5, self.eps) for view, cov in zip(views, covs)
     ]
     # The idea here is to form a matrix with M dimensions one for each view where at index
     # M[p_i,p_j,p_k...] we have the sum over n samples of the product of the pth feature of the
     # ith, jth, kth view etc.
     for i, el in enumerate(whitened_z):
         # To achieve this we start with the first view so M is nxp.
         if i == 0:
             M = el
         # For the remaining views we expand their dimensions to match M i.e. nx1x...x1xp
         else:
             for _ in range(len(M.size()) - 1):
                 el = torch.unsqueeze(el, 1)
             # Then we perform an outer product by expanding the dimensionality of M and
             # outer product with the expanded el
             M = torch.unsqueeze(M, -1) @ el
     M = torch.mean(M, 0)
     tl.set_backend("pytorch")
     M_parafac = parafac(
         M.detach(), self.latent_dims, verbose=False, normalize_factors=True
     )
     M_parafac.weights = 1
     M_hat = cp_to_tensor(M_parafac)
     return torch.linalg.norm(M - M_hat)
Ejemplo n.º 5
0
def Tucker_for_linear(model, decompose_rate):
    tl.set_backend('pytorch')
    linear_flag = 1
    for i, key in enumerate(model.classifier._modules.keys()):

        if isinstance(model.classifier._modules[key],
                      torch.nn.modules.linear.Linear):
            if linear_flag != 0:
                print('first linear layer')
                linear_layer = model.classifier._modules[key]
                decomposed = tucker_for_first_linear_layer(
                    linear_layer, decompose_rate)
                model.classifier._modules[key] = decomposed
                print('linear layer decompose end\n')
                linear_flag = 0
            else:
                linear_layer = model.classifier._modules[key]
                decomposed = tucker_decomposition_linear_layer(
                    linear_layer, decompose_rate)
                model.classifier._modules[key] = decomposed
                print('linear layer decompose end\n')

    torch.save(model, 'decomposed_model.pkl')
    print('tucker decompose end,model saved!\n')
    print(model)
    return model
Ejemplo n.º 6
0
def Tucker_decompose(model, ranks):

    tl.set_backend('pytorch')
    print('tucker decompose start\n')
    N = len(model.features._modules.keys())
    print('N is :', N)
    conv_flag = 1
    conv_count = 2
    for i, key in enumerate(model.features._modules.keys()):
        print('present layer is :', model.features._modules[key])
        if isinstance(model.features._modules[key],
                      torch.nn.modules.conv.Conv2d):
            conv_layer = model.features._modules[key]
            if conv_flag != 0:
                decomposed = tucker_for_first_conv_layer(conv_layer, ranks)
                model.features._modules[key] = decomposed
                print('conv layer decompose end')
                conv_flag = 0
            else:
                decomposed = tucker_decomposition_conv_layer(
                    conv_layer, ranks, conv_count)
                model.features._modules[key] = decomposed
                print('conv layer decompose end')
                conv_count += 2
    torch.save(model, 'decomposed_model.pkl')
    print('tucker decompose end,model saved!')
    # print(model)
    return model
Ejemplo n.º 7
0
def test_set_backend_local_threadsafe():
    pytest.importorskip('torch')

    global_default = tl.get_backend()

    with ThreadPoolExecutor(max_workers=1) as executor:

        with tl.backend_context('numpy', local_threadsafe=True):
            assert tl.get_backend() == 'numpy'
            # Changes only happen locally in this thread
            assert executor.submit(tl.get_backend).result() == global_default

        # Set the global default backend
        try:
            tl.set_backend('pytorch', local_threadsafe=False)

            # Changed toplevel default in all threads
            assert executor.submit(tl.get_backend).result() == 'pytorch'

            with tl.backend_context('numpy', local_threadsafe=True):
                assert tl.get_backend() == 'numpy'

                def check():
                    assert tl.get_backend() == 'pytorch'
                    with tl.backend_context('numpy', local_threadsafe=True):
                        assert tl.get_backend() == 'numpy'
                    assert tl.get_backend() == 'pytorch'

                executor.submit(check).result()
        finally:
            tl.set_backend(global_default, local_threadsafe=False)
            executor.submit(tl.set_backend, global_default).result()

        assert tl.get_backend() == global_default
        assert executor.submit(tl.get_backend).result() == global_default
Ejemplo n.º 8
0
def get_decomposed_tensor(i, obs_dir, ranks):
    tf.enable_eager_execution()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    X = load_observation(obs_dir)

    if type(X) == np.ndarray and X.shape[0] == 65:
        X = np.reshape(X, (65, 100, 116, 116))
        X_tf = tf.convert_to_tensor(X, dtype=tf.float32)
        tl.set_backend('tensorflow')
        core, factors = tucker(X_tf,
                               ranks=ranks,
                               init='random',
                               tol=10e-5,
                               verbose=False)
        transition_features = core.numpy().flatten()
        line = transition_features
        out = line

        print('Completed line {}'.format(i))

    else:
        print('Skipped line {}'.format(i))

        out = []
    sess.close()
    return out
Ejemplo n.º 9
0
def create_cp(dims,
              rank,
              sparsity=None,
              method='rand',
              weights=False,
              return_tensor=False,
              noise=None,
              sparse_noise=True):
    # TODO: investigate performance impact of setting backend here
    tl.set_backend('pytorch')

    if method == 'rand':
        randfunc = torch.rand
    elif method == 'randn':
        randfunc = torch.randn
    else:
        raise NotImplementedError(f'Unknown random method: {method}')

    n_dims = len(dims)
    factors = [randfunc((dim, rank)) for dim in dims]

    if sparsity is not None:
        if isinstance(sparsity, float):
            sparsity = [sparsity for _ in range(n_dims)]
        elif not isinstance(sparsity, list) and not isinstance(
                sparsity, tuple):
            raise ValueError(
                'Sparsity parameter should either be a float or tuple/list.')

        # Sparsify factors
        for dim in range(n_dims):
            n_el = dims[dim] * rank
            to_del = round(sparsity[dim] * n_el)
            if to_del == 0:
                continue
            idxs = torch.tensor(random.sample(range(n_el), to_del))
            factors[dim].view(-1)[idxs] = 0
            # torch.randperm(n_el, device=device)[:n_select]

    ten = None
    # Add noise
    if noise is not None:
        ten = tl.cp_to_tensor((torch.ones(rank), factors))
        if (sparsity is None or not sparse_noise):
            nten = torch.randn(ten.size())
            ten += noise * (norm(ten) / norm(nten)) * nten
        else:
            flat = ten.view(-1)
            nzs = torch.nonzero(flat, as_tuple=True)[0]
            nvec = torch.randn(nzs.size(0))
            flat[nzs] += noise * (norm(ten) / norm(nvec)) * nvec

    if return_tensor:
        if ten is None:
            return tl.cp_to_tensor((torch.ones(rank), factors))
        return ten
    if weights:
        return torch.ones(rank), factors
    return factors
Ejemplo n.º 10
0
 def __init__(self, shape, n_components):
     # Set backend as numpy
     tl.set_backend('numpy')
     self.shape = shape
     self.n_components = n_components
     self.Z = []
     self.X = None
     self.rand_init()
Ejemplo n.º 11
0
 def __init__(self, X, ranks, ks=[], ss=[], random_seed=1, store_phis=True):
     tl.set_backend('numpy')
     self.X = X
     self.ranks = ranks
     self.ks = ks
     self.ss = ss
     self.random_seed = random_seed
     self.store_phis = store_phis
Ejemplo n.º 12
0
 def __init__(self, sketchs, core_sketch, Tinfo_bucket, Rinfo_bucket):
     tl.set_backend('numpy')
     self.arms = []
     self.core_tensor = None
     self.sketchs = sketchs
     self.tensor_shape, self.k, self.rank, self.s = Tinfo_bucket.get_info()
     self.Rinfo_bucket = Rinfo_bucket
     self.core_sketch = core_sketch
Ejemplo n.º 13
0
 def __init__(self, n, rank, k, s, dim, Rinfo_bucket, gen_typ, noise_level):
     tl.set_backend('numpy')
     self.n, self.rank, self.k, self.s, self.dim = n, rank, k, s, dim
     self.std, self.typ, self.random_seed, self.sparse_factor = Rinfo_bucket.get_info(
     )
     self.total_num = np.prod(np.repeat(n, dim))
     self.gen_typ = gen_typ
     self.noise_level = noise_level
     self.Rinfo_bucket = Rinfo_bucket
Ejemplo n.º 14
0
def test(dataset, cwfa, action_encoder, obs_encoder):
    encoded_action = action_encoder(dataset.action.reshape(-1, dataset.action.shape[-1])).\
        reshape(dataset.action.shape[0], dataset.action.shape[1], -1).cpu().detach().numpy()
    encoded_obs = obs_encoder(dataset.obs.reshape(-1, dataset.obs.shape[-1])).\
        reshape(dataset.obs.shape[0], dataset.obs.shape[1], -1).cpu().detach().numpy()

    tl.set_backend('numpy')
    pred = []
    for i in range(len(encoded_obs)):
        pred.append(cwfa.predict(encoded_action[i], encoded_obs[i]))
    return np.asarray(pred).squeeze()
Ejemplo n.º 15
0
 def __init__(self, Tinfo_bucket, Rinfo_bucket, gen_typ, noise_level):
     tl.set_backend('numpy')
     self.tensor_shape, self.k, self.rank, self.s = Tinfo_bucket.get_info()
     self.n = self.tensor_shape[0]
     self.dim = len(self.tensor_shape)
     self.std, self.typ, self.random_seed, self.sparse_factor = Rinfo_bucket.get_info(
     )
     self.total_num = np.prod(self.tensor_shape)
     self.gen_typ = gen_typ
     self.noise_level = noise_level
     self.Tinfo_bucket = Tinfo_bucket
     self.Rinfo_bucket = Rinfo_bucket
Ejemplo n.º 16
0
def main():
    start_time = time.time()
    args = parse_arguments()
    lines, verb2id, subject2id, object2id = get_dict_and_samples(
        args.input_path, args.min_count, args.first_n, args.step)

    if args.sparse:
        large_tensor = create_sparse_tensor(lines, verb2id, subject2id,
                                            object2id)
        print("Decomposition started")
        if args.algorithm == 'tucker':
            weights, factors = partial_tucker(large_tensor,
                                              modes=[0, 1, 2],
                                              rank=args.embedding_size,
                                              init='random')
        else:
            weights, factors = sparse_parafac(large_tensor,
                                              rank=args.embedding_size,
                                              init='random')
    else:
        tl.set_backend('pytorch')

        large_tensor = create_tensor(lines, verb2id, subject2id, object2id)
        print("Decomposition started")
        if args.algorithm == 'tucker':
            weights, factors = tucker(large_tensor,
                                      rank=args.embedding_size,
                                      init='random')
        else:
            weights, factors = parafac(large_tensor,
                                       rank=args.embedding_size,
                                       init='random')
        factors = [factor.cpu().numpy().astype(float) for factor in factors]

    assert [factor.shape[0] for factor in factors
            ] == [len(verb2id), len(subject2id),
                  len(object2id)]

    output_path = os.path.join(
        args.output_path,
        f"{args.input_path[5:13]}-{args.algorithm}_e{args.embedding_size}_"
        f"min-count-{args.min_count}_cut-{args.first_n}_step-{args.step}")

    if not os.path.exists(output_path):
        os.mkdir(output_path)

    save_to_file(factors[0], verb2id, os.path.join(output_path, 'verbs.tsv'))
    save_to_file(factors[1], subject2id,
                 os.path.join(output_path, 'subjects.tsv'))
    save_to_file(factors[2], object2id, os.path.join(output_path,
                                                     'objects.tsv'))
    print(f"---- Took {(time.time() - start_time)} seconds ----")
Ejemplo n.º 17
0
    def __init__(self, X, ks, random_seed, ss = [], typ = 'g', \
        sparse_factor = 0.1, std = 1, store_phis = True):
        '''
        :param X: tensor being skeched
        :param ks: k, the reduced dimension of the arm tensors, an 1-d array 
        :param ss: At any index, the element of ss is greater than the element
         of kswhen ss = [], do not perform core sketch, that is, core_sketch == X
        :param random_seed: random_seed
        :param sparse_factor: only typ == 'sp', p matters representing the
         sparse factor
        '''
        tl.set_backend('numpy')
        self.X = X
        self.N = len(X.shape)
        self.ss = ss
        self.ks = ks
        self.typ = typ
        self.sparse_factor = sparse_factor
        self.arm_sketches = []
        self.random_seed = random_seed
        self.core_sketch = X
        self.tensor_shape = X.shape
        self.phis = []
        self.std = std 

        # set the random seed for following procedure
        np.random.seed(random_seed) 
        Rinfo_bucket = RandomInfoBucket(std = self.std, typ=self.typ, 
            random_seed = self.random_seed, sparse_factor = self.sparse_factor)

        rm_generator = Sketch.sketch_arm_rm_generator(self.tensor_shape, \
            self.ks, Rinfo_bucket)

        mode_n = 0
        for rm in rm_generator:
            self.arm_sketches.append(np.dot(tl.unfold(self.X, mode=mode_n), rm))
            mode_n += 1
        np.random.seed(random_seed) 

        if self.ss != []:
            rm_generator = Sketch.sketch_core_rm_generator(self.tensor_shape, \
             self.ss, Rinfo_bucket)
            mode_n = 0

            for rm in rm_generator: 
                self.phis.append(rm) 
                self.core_sketch = tl.tenalg.mode_dot(self.core_sketch, rm,\
                 mode=mode_n)
                mode_n += 1
            if not store_phis:
                self.phis = []
Ejemplo n.º 18
0
def test_cpd_als_tensorly(benchmark):
    for datatype in BACKEND_TYPES:
        tl.set_backend(datatype)
        assert tl.get_backend() == datatype

        _, input_tensor_val = init_rand_cp(dim, size, rank)
        input_tensor = tl.tensor(input_tensor_val, dtype='float64')
        factors = benchmark(parafac,
                            input_tensor,
                            rank=rank,
                            init='random',
                            tol=0,
                            n_iter_max=1,
                            verbose=0)
Ejemplo n.º 19
0
 def __init__(self,
              arm_sketches,
              core_sketch,
              Tinfo_bucket,
              Rinfo_bucket,
              phis=[]):
     tl.set_backend('numpy')
     self.arms = []
     self.core_tensor = None
     self.arm_sketches = arm_sketches
     # Note get_info extract some extraneous information
     self.tensor_shape, self.ks, self.ranks, self.ss = Tinfo_bucket.get_info(
     )
     self.Rinfo_bucket = Rinfo_bucket
     self.phis = phis
     self.core_sketch = core_sketch
Ejemplo n.º 20
0
    def prox_nuclear_truncated_2(self, data, alpha, k=50):
        import tensorly as tl
        tl.set_backend('pytorch')
        U, S, V = tl.truncated_svd(data.cpu(), n_eigenvecs=k)
        U, S, V = torch.FloatTensor(U).cuda(), torch.FloatTensor(S).cuda(), torch.FloatTensor(V).cuda()
        self.nuclear_norm = S.sum()
        # print("nuclear norm: %.4f" % self.nuclear_norm)

        S = torch.clamp(S-alpha, min=0)
        indices = torch.tensor(range(0, U.shape[0]),range(0, U.shape[0])).cuda()
        values = S
        diag_S = torch.sparse.FloatTensor(indices, values, torch.Size(U.shape))
        # diag_S = torch.diag(torch.clamp(S-alpha, min=0))
        U = torch.spmm(U, diag_S)
        V = torch.matmul(U, V)
        return V
Ejemplo n.º 21
0
 def __init__(self, shape, n_components, a=0.1, b=1):
     tl.set_backend('numpy')
     self.shape = shape
     self.n_components = n_components
     self.n_modes = len(shape)
     self.Z = []  # factors
     self.X = None  # tensor
     # Gamma shape(A) and mean(B) objects. Gamma scale is B/A
     self.A = self._create_gamma_prior(a)
     self.B = self._create_gamma_prior(b)
     # Inference variables:
     self.C = []  # variational shape parameters for factors
     self.D = []  # variational scale parameters for factors
     self.E = []  # arithmetic expectations of factors
     self.L = []  # geometric expectations of factors
     # Randomly initialize itself
     self.rand_init()
Ejemplo n.º 22
0
def create_cp_sparse_gen(dims, rank, n_el, method='rand', return_sparse=False):
    tl.set_backend('pytorch')

    if method == 'rand':
        randfunc = torch.rand
    elif method == 'randn':
        randfunc = torch.randn
    else:
        raise NotImplementedError(f'Unknown random method: {method}')

    n_dims = len(dims)
    factors = [randfunc((dim, rank)) for dim in dims]
    lambdas = torch.ones(rank)

    # Create probability tensor
    P = normalize_cp_ten((lambdas, factors))
    lambdas /= torch.sum(lambdas)

    # Count samples per component
    n_edges = n_el
    if n_edges < 1:
        n_edges = round(n_edges * torch.prod(dims))
    csums = probsample(n_edges, lambdas)

    subs = []
    # Calculate subscripts
    for c in range(rank):
        n_sample = int(csums[c])
        if n_sample == 0:
            continue

        sub_idxs = torch.zeros((int(n_sample), n_dims), dtype=torch.int64)
        for d in range(n_dims):
            sub_idxs[:, d] = probsample(n_sample,
                                        factors[d][:, c],
                                        return_counts=False)
        subs.append(sub_idxs)

    all_subs = torch.vstack(subs).T

    sp_ten = torch.sparse_coo_tensor(all_subs, torch.ones(n_edges))
    if return_sparse:
        return sp_ten

    return sp_ten.to_dense()
Ejemplo n.º 23
0
def main():
    tl.set_backend("pytorch")
    args = parser.parse_args()

    if args.rank is not None and args.threshold is not None:
        raise Exception(
            "Conflicting arguments passed: args.rank and args.threshold.\n\tYou can only set either rank argument or threshold argument. You can't set both."
        )
    if args.resume is not None and args.reset_weights:
        raise Exception(
            "Conflicting arguments passed: args.resume and args.reset_weights.\n\tYou can't resume training from a certain checkpoint and reset weights as well."
        )

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
Ejemplo n.º 24
0
def cp_decomposition_net(net):
    tl.set_backend('pytorch')
    N = len(net.features._modules.keys())
    for i, key in enumerate(net.features._modules.keys()):
        if i >= N - 2:
            break
        if isinstance(net.features._modules[key], torch.nn.modules.conv.Conv2d):
            conv_layer = net.features._modules[key]
            rank = max(conv_layer.weight.data.cpu().numpy().shape) // 3
            decomposed = cp_decomposition_conv_layer(conv_layer, rank)  # CP分解

            net.features._modules[key] = decomposed

    for param in net.parameters():
        param.requires_grad = True
    if torch.cuda.is_available():
        net.cuda()
    return net
Ejemplo n.º 25
0
 def __init__(self, shape, n_components, alpha=0.1, beta=1):
     tl.set_backend('numpy')
     self.shape = shape
     self.n_components = n_components
     self.n_modes = len(shape)
     self.T = None  # factor 1
     self.V = None  # factor 2
     self.X = None  # tensor
     # Gamma shape(A) and mean(B) objects. Gamma scale is B/A
     self.At = self.Av = None
     self.Bt = self.Bv = None
     # Inference variables:
     self.Ct = self.Cv = None
     self.Dt = self.Dv = None
     self.Et = self.Ev = None
     self.Lt = self.Lv = None
     # Randomly initialize itself
     self.rand_init(alpha, beta)
     self.rand_init(alpha, beta)
Ejemplo n.º 26
0
def run_parafac(path,
                dimensions=20,
                nonnegative=False,
                is_matrix=True,
                name='current',
                cuda=False,
                noise='orig',
                iterations=1,
                fixed='False'):
    # setting up  the backend
    if cuda:
        tl.set_backend('pytorch')
    # checking if the arguments are valid
    assert is_ndarray_folder(path), "Invalid path."
    assert '_' not in name, "Name cannot contain the '_' symbol."
    # creating some useful paths to store factorization results
    mat_path = join(
        path, 'mat_' + str(dimensions) + '_' + str(iterations) + '_' + name)
    ten_path = join(
        path, 'ten_' + str(dimensions) + '_' + str(iterations) + '_' + name)
    # loading the meta data
    with open(join(path, 'meta_data.json'), 'r') as json_file:
        meta_data = json.load(json_file)
    # removing old factorization with same name (if exists)
    delete_factorization_by_name(name, path)
    # factorizing the data
    start = time.time()
    if is_matrix:
        matrix = sparse.load_npz(join(path, 'matrix.npz')).todense()
        matrices = prepare_ndarrays(matrix, iterations, fixed, noise)
        create_folder_if_absent(mat_path)
        factorize_matrices(matrices, iterations, dimensions, nonnegative,
                           mat_path, cuda)
    else:
        tensor = sparse.load_npz(join(path, 'tensor.npz')).todense()
        tensors = prepare_ndarrays(tensor, iterations, fixed, noise)
        create_folder_if_absent(ten_path)
        factorize_tensors(tensors, iterations, dimensions, nonnegative,
                          ten_path, meta_data, path, cuda)
    end = time.time()
    print('Factorization completed in %d seconds' % (end - start))
def build(model, decomp='cp'):
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    print('==> Building model..')
    tl.set_backend('pytorch')
    full_net = model
    full_net = full_net.to(device)

    path = 'models/'
    if not os.path.exists(path):
        os.mkdir(path)
    torch.save(full_net, path + 'model')
    if decomp:
        decompose_conv(decomp)
        decompose_fc(decomp)
    if device == 'cuda:0':
        net = torch.load(path + "model").cuda()
    else:
        net = torch.load(path + "model")
    print(net)
    print('==> Done')
    return net
Ejemplo n.º 28
0
def get_dataset(kde, **option):

    option_default = {
        'env': gym.make('Pendulum-v0'),
        'num_trajs': 1000,
        'max_episode_length': 10,
        'window_size': 5
    }
    option = {**option_default, **option}

    action_all_pr, observation_all_pr, x_pr, new_rewards_pr = generate_data(
        **option)
    pr = construct_PR_target(kde, x_pr, new_rewards_pr)
    tl.set_backend('pytorch')
    pr = normalize(pr)
    new_data = Dataset(data=[
        tl.tensor(action_all_pr).float(),
        tl.tensor(observation_all_pr).float(),
        tl.tensor(pr).float()
    ])

    return new_data
Ejemplo n.º 29
0
 def __init__(self,
              mask=None,
              tol=10e-7,
              reg_E=1,
              reg_J=1,
              mu_init=10e-5,
              mu_max=10e9,
              learning_rate=1.1,
              n_iter_max=100,
              verbose=1,
              backend='numpy'):
     self.mask = mask
     self.tol = tol
     self.reg_E = reg_E
     self.reg_J = reg_J
     self.mu_init = mu_init
     self.mu_max = mu_max
     self.lr = learning_rate
     self.n_iter_max = n_iter_max
     self.verbose = verbose
     self.backend = backend
     tl.set_backend(backend)
Ejemplo n.º 30
0
    def get_cp_factors(self):
        tl.set_backend('pytorch')
        bias = self.bias

        if isinstance(self.layer, nn.Sequential):
            # Tensorly case
            _, (f_cout, f_cin, f_h, f_w) = parafac(kruskal_to_tensor(
                (None, self.weight)),
                                                   self.rank,
                                                   n_iter_max=5000,
                                                   init='random',
                                                   tol=1e-8,
                                                   svd=None,
                                                   cvg_criterion='rec_error')

        else:
            # Tensorly case
            _, (f_cout, f_cin, f_h, f_w) = parafac(self.weight,
                                                   self.rank,
                                                   n_iter_max=5000,
                                                   init='random',
                                                   tol=1e-8,
                                                   svd=None,
                                                   cvg_criterion='rec_error')

#         # Reshape factor matrices to 4D weight tensors
#         f_cin: (cin, rank) -> (rank, cin, 1, 1)
#         f_h: (h, rank) -> (rank, 1, h, 1)
#         f_w: (w, rank) -> (rank, 1, 1, w)
#         f_cout: (count, rank) -> (count, rank, 1, 1)

# Pytorh case
        f_cin = f_cin.t().unsqueeze_(2).unsqueeze_(3).contiguous()
        f_h = f_h.t().unsqueeze_(1).unsqueeze_(3).contiguous()
        f_w = f_w.t().unsqueeze_(1).unsqueeze_(2).contiguous()
        f_cout = f_cout.unsqueeze_(2).unsqueeze_(3).contiguous()

        return [f_cin, f_h, f_w, f_cout], [None, None, None, bias]
Ejemplo n.º 31
0
    parser.add_argument("--decompose", dest="decompose", action="store_true")
    parser.add_argument("--fine_tune", dest="fine_tune", action="store_true")
    parser.add_argument("--train_path", type = str, default = "train")
    parser.add_argument("--test_path", type = str, default = "test")
    parser.add_argument("--cp", dest="cp", action="store_true", \
        help="Use cp decomposition. uses tucker by default")
    parser.set_defaults(train=False)
    parser.set_defaults(decompose=False)
    parser.set_defaults(fine_tune=False)
    parser.set_defaults(cp=False)    
    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = get_args()
    tl.set_backend('numpy')

    if args.train:
        model = ModifiedVGG16Model().cuda()
        optimizer = optim.SGD(model.classifier.parameters(), lr=0.0001, momentum=0.99)
        trainer = Trainer(args.train_path, args.test_path, model, optimizer)

        trainer.train(epoches = 10)
        torch.save(model, "model")

    elif args.decompose:
        model = torch.load("model").cuda()
        model.eval()
        model.cpu()
        N = len(model.features._modules.keys())
        for i, key in enumerate(model.features._modules.keys()):