Esempio n. 1
0
def randomwalk_decomposition(goal_tn):
    loss_fun = cc.tensor_recovery_loss
    input_dims = [t.shape[i] for i, t in enumerate(goal_tn)]
    base_tn = cc.random_tn(input_dims, rank=1)

    # Initialize the first tensor network close to zero
    for i in range(len(base_tn)):
        base_tn[i] /= 10
    base_tn = cc.make_trainable(base_tn)

    trained_tn, best_loss, loss_record = randomwalk_optim(
        base_tn,
        goal_tn,
        loss_fun,
        other_args={
            'cprint': True,
            'epochs': 10000,
            'max_iter': 20,
            'lr': 0.01,
            'optim': 'RMSprop',
            'search_epochs': 80,
            'cvg_threshold': 1e-10,
            'stop_on_plateau': {
                'mode': 'min',
                'patience': 100,
                'threshold': 1e-7
            },
            'dyn_print': True,
            'initial_epochs': 10
        })
    return loss_record
Esempio n. 2
0
def randomwalk_regression(train_data, val_data=None):
    loss_fun = cc.regression_loss
    input_dims = [t.shape[1] for t in train_data[0]]
    base_tn = cc.random_tn(input_dims, rank=1)

    # Initialize the first tensor network close to zero
    for i in range(len(base_tn)):
        base_tn[i] /= 10
    base_tn = cc.make_trainable(base_tn)

    trained_tn, best_loss, loss_record = randomwalk_optim(
        base_tn,
        train_data,
        loss_fun,
        val_data=val_data,
        other_args={
            'cprint': True,
            'epochs': None,
            'max_iter': 20,
            'lr': 0.01,
            'optim': 'RMSprop',
            'search_epochs': 80,
            'cvg_threshold': 1e-10,
            'bsize': 100,
            'is_reg': True,
            'stop_on_plateau': {
                'mode': 'min',
                'patience': 50,
                'threshold': 1e-7
            },
            'dyn_print': True,
            'initial_epochs': 10
        })
    return loss_record
Esempio n. 3
0
def _limit_random_tn(input_dims, rank, max_params):
    """Random TN with max_params (rejection sampling)"""
    new_tn = cc.random_tn(input_dims, rank)
    if cc.num_params(new_tn) < max_params:
        return new_tn
    else:
        _limit_random_tn(input_dims, rank, max_params)
def generate_tensor_ring(input_dims, tr_ranks):
    """
    Generate random tensor ring
    
    Args:
        input_dims: List of input dimensions for each core in the network
        tr_ranks:   List of TR ranks
        
    Returns:
        tr_cores:   List of randomly initialized tensor ring cores
    """
    assert len(input_dims) == len(tr_ranks)
    n_cores = len(input_dims)
    ranks = []
    for i in range(n_cores-1):
            rank_i = np.ones((n_cores-1-i), dtype=np.int32)
            rank_i[0] = tr_ranks[i]
            ranks.append(rank_i.tolist())
    ranks[0][-1] = tr_ranks[-1]
    tr_cores = cc.random_tn(input_dims=input_dims, rank=ranks)
    
    return tr_cores 
Esempio n. 5
0
def greedy_completion(dataset, input_dims, initial_network=None,filename=None):

    loss_fun = cc.completion_loss
    from generate_tensor_ring import generate_tensor_ring

    base_tn = cc.random_tn(input_dims, rank=1)

    # Initialize the first tensor network close to zero
    for i in range(len(base_tn)):
        base_tn[i] /= 1
    base_tn = cc.make_trainable(base_tn)


    if initial_network:
        base_tn = initial_network

    # create list of all edges allowed in a TR decomposition    
    #ndims = len(base_tn)
    #tr_edges = [(i,j) for i in range(ndims) for j in range(i+1,ndims) if i+1==j] + [(0,ndims-1)]


    lr_scheduler = lambda optimizer: ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=50, verbose=True,threshold=1e-7)
    trained_tn, best_loss, loss_record = greedy_optim(base_tn, 
                                                      dataset, loss_fun, 
                                                      find_best_edge=greedy_find_best_edge,
                                                      other_args={'cprint':True, 'epochs':20000, 'max_iter':100, 
                                                                  'lr':0.01, 'optim':'RMSprop', 'search_epochs':20, 
                                                                  'cvg_threshold':1e-10, 
                                                                  #'stop_on_plateau':{'mode':'min', 'patience':50, 'threshold':1e-7},
                                                                  'dyn_print':True,'initial_epochs':10,'bsize':-1,
                                                                  'rank_increment':2,
                                                                  #'allowed_edges':tr_edges
                                                                  'lr_scheduler':lr_scheduler,
                                                                  'filename':filename
                                                                  })


    return loss_record
def generate_tensor_tri(input_dims, tri_ranks):
    """
    Generate random tensor network with tiangle structure
    
    Args:
        input_dims: List of input dimensions for each core in the network
        tri_ranks:  List of ranks
        
    Returns:
        tri_cores:  List of randomly initialized tensor cores for triangle TN
    """
    assert len(input_dims) == len(tri_ranks)
    n_cores = len(input_dims)
    ranks = []
    for i in range(n_cores - 1):
        rank_i = np.ones((n_cores - 1 - i), dtype=np.int32)
        rank_i[0] = tri_ranks[i]
        ranks.append(rank_i.tolist())
    ranks[-1][-1] = 1
    ranks[1][-1] = tri_ranks[-2]
    ranks[2][-1] = tri_ranks[-1]
    tri_cores = cc.random_tn(input_dims=input_dims, rank=ranks)

    return tri_cores
def main(args):
    num_train = args.ntrain
    num_val = args.nval
    input_dims = [7, 7, 7, 7, 7]
    goal_tn = torch.load(args.path)
    base_tn = cc.random_tn(input_dims, rank=1)
    base_tn = cc.make_trainable(base_tn)
    loss_fun = cc.regression_loss
    train_data = cc.generate_regression_data(goal_tn, num_train, noise=1e-6)
    val_data = cc.generate_regression_data(goal_tn, num_val, noise=1e-6)

    best_network, first_loss, best_loss, loss_record, loss_hist, param_count, d_loss_hist = rw.randomwalk_optim(
        base_tn,
        train_data,
        loss_fun,
        val_data=val_data,
        other_args={
            'dhist': True,
            'optim': 'RMSprop',
            'max_iter': args.maxiter,
            'epochs': None,  # early stopping
            'lr': 0.001
        })

    plt.figure(figsize=(4, 3))
    plt.plot(loss_hist[0])
    plt.xlabel('Epoch')
    plt.ylabel('Training loss')
    plt.savefig('./figures/' + args.path + '_randomwalk' + '_trainloss' +
                '_.pdf',
                bbox_inches='tight')

    plt.figure(figsize=(4, 3))
    plt.plot(param_count, d_loss_hist[0])
    plt.xlabel('Number of parameters')
    plt.ylabel('Training loss')
    plt.savefig('./figures/' + args.path + '_randomwalk' +
                '_trainloss_numparam' + '_.pdf',
                bbox_inches='tight')

    plt.figure(figsize=(4, 3))
    plt.plot(param_count, loss_record)
    plt.xlabel('Number of parameters')
    plt.ylabel('Validation loss')
    plt.savefig('./figures/' + args.path + '_randomwalk' +
                '_valloss_numparam' + '_.pdf',
                bbox_inches='tight')

    ### TODO: Add greedy

    ### random search
    best_network, first_loss, best_loss, loss_record, param_count, d_loss_hist = rs.randomsearch_optim(
        base_tn,
        train_data,
        loss_fun,
        val_data=val_data,
        other_args={
            'dhist': True,
            'optim': 'RMSprop',
            'max_iter': args.maxiter,
            'epochs': None,  # early stopping
            'lr': 0.001
        })

    plt.figure(figsize=(4, 3))
    plt.plot(param_count, d_loss_hist[0])
    plt.xlabel('Number of parameters')
    plt.ylabel('Training loss')
    plt.savefig('./figures/' + args.path + '_randomsearch' +
                '_trainloss_numparam' + '_.pdf',
                bbox_inches='tight')

    plt.figure(figsize=(4, 3))
    plt.plot(param_count, loss_record)
    plt.xlabel('Number of parameters')
    plt.ylabel('Validation loss')
    plt.savefig('./figures/' + args.path + '_randomsearch' +
                '_valloss_numparam' + '_.pdf',
                bbox_inches='tight')
    torch.manual_seed(0)
    #Target tensor is a chain
    #Tensor decomposition
    d0 = 4
    d1 = 4
    d2 = 4
    d3 = 4
    d4 = 4
    d5 = 4
    r12 = 2
    r23 = 3
    r34 = 6
    r45 = 5
    r56 = 4
    input_dims = [d0, d1, d2, d3, d4, d5]
    rank_list = [[r12, 1, 1, 1, 1], [r23, 1, 1, 1], [r34, 1, 1], [r45, 1],
                 [r56]]

    # Parameters to control the experimental behavior
    exp_params = {'print': False, 'epochs': 200}

    loss_fun = cc.tensor_recovery_loss
    base_tn = cc.random_tn(input_dims, rank=1)
    goal_tn = cc.random_tn(input_dims, rank=rank_list)
    base_tn = cc.make_trainable(base_tn)
    _, _, better_loss = discrete_optim_template(base_tn,
                                                goal_tn,
                                                loss_fun,
                                                other_args=exp_params)
    print('better loss = ', better_loss)
Esempio n. 9
0
    #                      [r34, 1, 1],
    #                           [r45, 1],
    #                                [r56]]

    # New target
    d0,d1,d2,d3,d4   = 7,7,7,7,7
    r12,r23,r34,r45 =  2,3,6,5

    input_dims = [d0, d1, d2, d3, d4]
    # rank_list = [[r12, 1, 1, 1], 
    #                  [r23,1, 1], 
    #                      [r34, 1],
    #                           [r45]]
    
    loss_fun = cc.tensor_recovery_loss
    base_tn = cc.random_tn(input_dims, rank=1)
    
    # Initialize the first tensor network close to zero
    for i in range(len(base_tn)):
        base_tn[i] /= 10
    base_tn = cc.make_trainable(base_tn)
    goal_tn = torch.load('tt_cores_5.pt')
    print('target tensor network number of params: ', cc.num_params(goal_tn))
    print('number of params for full target tensor:', np.prod(input_dims))
    print('target tensor norm:', cc.l2_norm(goal_tn))


    from torch.optim.lr_scheduler import ReduceLROnPlateau

    lr_scheduler = lambda optimizer: ReduceLROnPlateau(optimizer, mode='min', factor=1e-10, patience=200, verbose=True,threshold=1e-7)
    trained_tn, init_loss, better_loss = greedy_optim(base_tn,