コード例 #1
0
def main():
    json_file = open('params.json')
    json_str = json_file.read()
    config = json.loads(json_str)

    ################
    # Train Loader #
    ################
    test_loader = __load_data(config)

    #######################
    # Loading the trained #
    ######## model ########
    model = NN(config['architecture'], is_maskable=True)
    for (_, _, files) in os.walk(config['model_path']):
        file = [s for s in files if 'pt' in s]

    checkpoint = torch.load(config['model_path'] + '/' + file[0],
                            map_location=config['device'])
    model.load_state_dict(checkpoint)
    model = model.to(config['device'])
    model.eval()

    ##############
    # Evaluating #
    ##############
    classes = range(0, 10)
    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    for images, labels in test_loader:
        images, labels = images.to(config['device']), labels.to(
            config['device'])

        output = model(images)
        # convert output probabilities to predicted class
        _, pred = torch.max(output, 1)
        # compare predictions to true label
        correct = np.squeeze(pred.eq(labels.data.view_as(pred)))
        # calculate test accuracy for each object class
        for i in range(len(labels)):
            label = labels.data[i]
            class_correct[label] += correct[i].item()
            class_total[label] += 1

    for i in range(10):
        if class_total[i] > 0:
            logging.info('Test Accuracy of {}: {}% ({}/{})'.format(
                str(i), 100 * class_correct[i] / class_total[i],
                np.sum(class_correct[i]), np.sum(class_total[i])))
        else:
            logging.info('Test Accuracy of %5s: N/A (no training examples)' %
                         (classes[i]))

    logging.info('Test Accuracy (Overall): {}% ({}/{})'.format(
        100. * np.sum(class_correct) / np.sum(class_total),
        np.sum(class_correct), np.sum(class_total)))
コード例 #2
0
def main():
    json_file = open('params.json')
    json_str = json_file.read()
    config = json.loads(json_str)

    ################
    # Train Loader #
    ################
    train_loader = __load_data(config)

    #######################
    # Loading the trained #
    ######## model ########
    model = NN(config['architecture'], is_maskable=True)

    dt = []
    for (root, dirs, files) in os.walk(config['model_path']):
        if 'SEARCH' in root or 'Lottery' in root:
            ckpt = [f for f in files if '.pt' in f]

            checkpoint = torch.load(root + '/' + ckpt[0],
                                    map_location=config['device'])
            model.load_state_dict(checkpoint)
            model = model.to(config['device'])
            model.eval()

            logging.info(
                'Computing the train accuracy from:\n\t{}'.format(root + '/' +
                                                                  ckpt[0]))
            names = root.split('/')
            name = 'LOTTERY_TICKETS'
            if 'SEARCH' in root:
                name = names[1] + '__' + names[4]
            dt.append([name])

            ##############
            # Evaluating #
            ##############
            __make_evaluation(model, train_loader, config, dt)

    dt = pd.DataFrame(dt)
    name = config['model_path']
    dt.to_csv(f'{name}_train_acc.tsv', index=False, sep='\t')
コード例 #3
0
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format='[%(asctime)s %(filename)s] %(message)s')
    json_file = open('params.json')
    json_str = json_file.read()
    config = json.loads(json_str)


    if len(sys.argv) == 2:
        config['policy_dir'] = sys.argv[1]

    # loading the dataset
    train_loader, valid_loader = __load_data(config)

    # Creating the model
    model = NN(config['model']['architecture'], is_maskable = True)
    model = model.to(config['device'])

    # Getting the criterion, optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(config, model)

    ###################
    # Train and Prune #
    ###################
    done = False
    epoch = 0
    while not done:
        logging.info('')
        logging.info('Global step {}'.format(epoch))
        logging.info('')
        train(model, train_loader, valid_loader, criterion, optimizer, config['train']['epochs'], config['train']['print_every'], config['device'])

        if epoch % config['prune']['each'] == 0 and epoch != 0:
            logging.info('Pruning the model')
            model = __prune(model, config)
            
            if config['prune']['reward_weights'] == True:
                logging.info('Rewarding the weights')
                model.reward()
        
            done = __verify_stop(model, config)
        
        epoch += 1

    ##############
    # Last Train #
    ##############
    train(model, train_loader, valid_loader, criterion, optimizer, 5, config['train']['print_every'], config['device'])
    
    checkpoint = config['policy_dir'] + '/Train-{}-epochs__Prune-each-{}__Prune-rate-{}.pt'.format(
            config['train']['epochs'], config['prune']['each'], config['prune']['rate']
    )
    torch.save(model.state_dict(), checkpoint)
    logging.info("Model checkpoint saved to %s" % checkpoint)

    ######################
    # Validate the model #
    ######################
    test_loss = 0.0
    classes = range(0, 10)
    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    model.eval() # prep model for evaluation

    for data, target in valid_loader:
        data, target = data.to(config['device']), target.to(config['device'])

        # forward pass: compute predicted outputs by passing inputs to the model
        output = model(data)
        # calculate the loss
        loss = criterion(output, target)
        # update test loss 
        test_loss += loss.item()*data.size(0)
        # convert output probabilities to predicted class
        _, pred = torch.max(output, 1)
        # compare predictions to true label
        correct = np.squeeze(pred.eq(target.data.view_as(pred)))
        # calculate test accuracy for each object class
        for i in range(len(target)):
            label = target.data[i]
            class_correct[label] += correct[i].item()
            class_total[label] += 1

    # calculate and print avg test loss
    test_loss = test_loss/len(valid_loader.sampler)
    logging.info('Valid Loss: {:.6f}\n'.format(test_loss))

    results = []
    for i in range(10):
        if class_total[i] > 0:
            logging.info('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
                str(i), 100 * class_correct[i] / class_total[i],
                np.sum(class_correct[i]), np.sum(class_total[i])))
            results.append([str(i), 100 * class_correct[i] / class_total[i],
                np.sum(class_correct[i]), np.sum(class_total[i])])
        else:
            logging.info('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

    logging.info('Test Accuracy (Overall): %2d%% (%2d/%2d)' % (
        100. * np.sum(class_correct) / np.sum(class_total),
        np.sum(class_correct), np.sum(class_total)))

    results = pd.DataFrame(results)
    results.to_csv(
        config['policy_dir'] + '/Accuracy__Lottery-train-{}-epochs__Prune-each-{}__Prune-rate-{}.tsv'.format(
            config['train']['epochs'], config['prune']['each'], config['prune']['rate']
        ), 
        index = False, 
        header = ['Class', 'Accuracy', 'Right_Instances', 'Total_Instances'],
        sep = '\t'
    )
コード例 #4
0
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format="[%(asctime)s %(filename)s] %(message)s")
    json_file = open('params.json')
    json_str = json_file.read()
    config = json.loads(json_str)

    args = __create_args()

    config = __adjust_config(args, config)

    # loading the dataset
    train_loader, valid_loader = __load_data(config)

    # Creating the model
    model = NN(config['model']['architecture'], is_maskable=True)
    model = model.to(config['device'])
    initial_mask = model.masks

    # Getting the criterion, optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(config, model)

    #########################
    # Agent and Environment #
    #########################
    ACTIONS = create_environment(model.masks, config['environment_protocol'])
    random.seed(42)
    shuffle(ACTIONS)
    N_STATES = len(ACTIONS)
    N_EPISODES = config['mdp']['N_EPISODES']
    MAX_EPISODE_PER_STEPS = config['mdp']['MAX_STEPS_PER_EPISODES']
    MIN_ALPHA = config['mdp']['MIN_ALPHA']
    GAMMA = config['mdp']['GAMMA']
    alphas = np.linspace(1.0, MIN_ALPHA, N_EPISODES)

    q_table = dict()
    start_state = State(model.masks, ACTIONS)

    ##########################
    # Create sub_working_dir #
    ##########################
    sub_working_dir = '{}/results/{}/{}/{}/{}'.format(
        config['working_dir'], config['model']['name'],
        '_' + config['mdp']['Q_COMPUTATION'],
        '{}_{}_{}/{}_{}'.format(time.strftime("%d", time.localtime()),
                                time.strftime("%m", time.localtime()),
                                time.strftime("%Y", time.localtime()),
                                time.strftime("%H", time.localtime()),
                                time.strftime("%M", time.localtime())),
        'ALPHA_SEARCH__MIN_ALPHA-{}__GAMMA-{}__PRUNE_TYPE-{}__PRUNE_PERCENT-{}__EPSILON-{}__REWARD_TYPE-{}'
        .format(MIN_ALPHA,
                GAMMA if config['mdp']['Q_COMPUTATION'] != 'QL_M' else 'None',
                config['environment_protocol'],
                config['agent']['prune_percentage'],
                config['agent']['epsilon'], config['agent']['reward_type']))

    if not os.path.exists(sub_working_dir):
        os.makedirs(sub_working_dir)
    config["sub_working_dir"] = sub_working_dir
    logging.info("sub working dir: %s" % sub_working_dir)

    ###############
    # Begin Train #
    ###############
    train(model, train_loader, valid_loader, criterion, optimizer,
          config['train']['epochs'], config['train']['print_every'],
          config['device'])
    loss, accuracy = validation(model, valid_loader, criterion)
    logging.info(
        'Validation Loss performed: {}\tValidation Accuracy performed: {}'.
        format(loss, accuracy))

    if config['agent']['reward_type'] == 'ACCURACY':
        start_state.last_reward = -(1. - accuracy)
    elif config['agent']['reward_type'] == 'LOSS':
        start_state.last_reward = -loss
    elif config['agent']['reward_type'] == 'ACC_COMPRESSION':
        start_state.last_reward = -(1. - accuracy)
    elif config['agent']['reward_type'] == 'MY_RCRA':
        start_state.last_reward = -(1. - accuracy)

    #########
    # Prune #
    #########
    for e in range(N_EPISODES):

        state = deepcopy(start_state)
        total_reward = .0
        ALPHA = alphas[e]
        agent = Agent(config, ACTIONS, model, valid_loader, criterion)

        for i in range(MAX_EPISODE_PER_STEPS):
            action = agent.choose_action(q_table, state)

            next_state, reward, done = agent.act(state, action)
            total_reward += reward

            if config['mdp']['Q_COMPUTATION'] == 'QL_M':
                # Q-Learning from Ghallab, Nau and Traverso
                q_value(q_table, state)[action] = q_value(q_table, state, action) + \
                    ALPHA * (reward + np.max(q_value(q_table, next_state)) - q_value(q_table, state, action))

            elif config['mdp']['Q_COMPUTATION'] == 'QL_WIKI':
                # Q-Learning from from Wikipedia
                q_value(q_table, state)[action] = (1. - ALPHA) * q_value(q_table, state, action) + \
                    ALPHA * (reward + GAMMA * np.max(q_value(q_table, next_state)))

            del state
            state = next_state
            if done:
                break

        logging.info("Episode {}: reward type {}: total reward -> {}".format(
            e + 1, config['agent']['reward_type'], total_reward))

    #####################
    # Save the solution #
    #####################
    q_table_saver(q_table, config['sub_working_dir'], '/q_table.tsv')

    agent = Agent(config, ACTIONS, model, valid_loader, criterion)
    my_state = start_state
    result = []
    done = False
    while not done:
        sa = q_value(q_table, my_state)
        my_action = np.argmax(sa)
        action = my_state.environment[my_action]
        my_state, reward, done = agent.act(my_state, my_action)
        result.append([action, reward])

    final = pd.DataFrame(result, columns=['Action', 'Reward'])
    final.to_csv(config['sub_working_dir'] + '/actions_to_prune.tsv',
                 sep='\t',
                 index=False)
コード例 #5
0
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format="[%(asctime)s %(filename)s] %(message)s")
    json_file = open('params.json')
    json_str = json_file.read()
    config = json.loads(json_str)

    # Create sub_working_dir
    sub_working_dir = '{}/{}/try{}/{}'.format(
        config['working_dir'],
        config['model']['name'], 
        config['try'],
        # time.strftime("%Y%m%d%H%M%S", time.localtime())
        '{}_{}/{}/{}_{}'.format(
            time.strftime("%Y", time.localtime()),
            time.strftime("%m", time.localtime()),
            time.strftime("%d", time.localtime()),
            time.strftime("%H", time.localtime()),
            time.strftime("%S", time.localtime())
        ) 
    )
    if not os.path.exists(sub_working_dir):
        os.makedirs(sub_working_dir)
    config["sub_working_dir"] = sub_working_dir
    logging.info("sub working dir: %s" % sub_working_dir)

    # Creat tf_summary writer
    config["tensorboard_writer"] = SummaryWriter(sub_working_dir)
    logging.info("Please using 'python -m tensorboard.main --logdir={}'".format(sub_working_dir))

    # loading the dataset
    train_loader, valid_loader, test_loader = __load_data(config)

    # Creating the model
    model = NN(config['model']['architecture'], is_maskable = True)
    model = model.to(config['device'])

    # Getting the criterion, optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(config, model)

    ###############
    # Begin Train #
    ###############
    train(model, train_loader, valid_loader, criterion, optimizer, config['train']['epochs'], config['train']['print_every'], config['device'])

    ##############
    # Begin Test #
    ##############
    # initialize lists to monitor test loss and accuracy
    test_loss = 0.0
    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))

    model.eval() # prep model for evaluation

    for data, target in test_loader:
        data, target = data.to(config['device']), target.to(config['device'])

        # forward pass: compute predicted outputs by passing inputs to the model
        output = model(data)
        # calculate the loss
        loss = criterion(output, target)
        # update test loss 
        test_loss += loss.item()*data.size(0)
        # convert output probabilities to predicted class
        _, pred = torch.max(output, 1)
        # compare predictions to true label
        correct = np.squeeze(pred.eq(target.data.view_as(pred)))
        # calculate test accuracy for each object class
        for i in range(len(target)):
            label = target.data[i]
            class_correct[label] += correct[i].item()
            class_total[label] += 1

    # calculate and logging.info avg test loss
    test_loss = test_loss/len(test_loader.sampler)
    logging.info('Test Loss: {:.6f}\n'.format(test_loss))

    for i in range(10):
        if class_total[i] > 0:
            logging.info('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
                str(i), 100 * class_correct[i] / class_total[i],
                np.sum(class_correct[i]), np.sum(class_total[i])))
        else:
            logging.info('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

    logging.info('Test Accuracy (Overall): %2d%% (%2d/%2d)' % (
        100. * np.sum(class_correct) / np.sum(class_total),
        np.sum(class_correct), np.sum(class_total)))    
コード例 #6
0
def main():
    logging.basicConfig(level=logging.DEBUG,
                        format='[%(asctime)s %(filename)s] %(message)s')
    json_file = open('params.json')
    json_str = json_file.read()
    config = json.loads(json_str)

    args = __create_args()
    config = __adjust_config(config, args)

    # loading the dataset
    train_loader, valid_loader, test_loader = __load_data(config)

    # Creating the model
    model = NN(config['model']['architecture'], is_maskable = True)
    model = model.to(config['device'])
    # Reusing the mask
    for (_, _, files) in os.walk(config['model_path']):
        file = [s for s in files if 'pt' in s]
    reused = NN(config['model']['architecture'], is_maskable = True)
    checkpoint = torch.load(config['model_path'] + '/' + file[0], map_location=config['device'])
    logging.info('Loading checkpoint: {}'.format(config['model_path'] + '/' + file[0]))
    reused.load_state_dict(checkpoint)

    model.masks = reused.masks
    model.masks = model.masks.to(config['device'])
    total_weights = 0
    remaining_weights = 0
    for mask in model.masks:
        total_weights += np.prod(mask.weight.shape, 0)
        remaining_weights += torch.sum(mask.weight)
    logging.info('Total of weights: {}\tRemaining weights: {}'.format(total_weights, remaining_weights))
    
    # Getting the criterion, optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(config, model)
    
    
    for e in range(config['epochs']):
        #########
        # Train #
        #########
        model.train()
        train_loss = .0
        train_correct = .0
        train_total = .0
        for data, target in train_loader:
            data, target = data.to(config['device']), target.to(config['device'])

            # forward pass: compute predicted outputs by passing inputs to the model
            output = model(data)
            # calculate the loss
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            # update test loss 
            train_loss += loss.item()*data.size(0)
            # convert output probabilities to predicted class
            _, pred = torch.max(output, 1)
            # compare predictions to true label
            correct = np.squeeze(pred.eq(target.data.view_as(pred)))
            # calculate train accuracy for each object class
            train_correct += np.sum(correct)
            train_total += data.shape[0]

        ############
        # Validate #
        ############
        model.eval()
        valid_loss = .0
        valid_correct = .0
        valid_total = .0
        for data, target in valid_loader:
            data, target = data.to(config['device']), target.to(config['device'])

            # forward pass: compute predicted outputs by passing inputs to the model
            output = model(data)
            # calculate the loss
            loss = criterion(output, target)
            # update valid loss 
            valid_loss += loss.item()*data.size(0)
            # convert output probabilities to predicted class
            _, pred = torch.max(output, 1)
            # compare predictions to true label
            correct = np.squeeze(pred.eq(target.data.view_as(pred)))
            # calculate valid accuracy for each object class
            valid_correct += np.sum(correct)
            valid_total += data.shape[0]
        
        logging.info('Train Loss: {} Train Accuracy: {}\tValid Loss: {} Valid Accuracy: {}'.format(
            train_loss, train_correct/train_total,
            valid_loss, valid_correct/valid_total
        ))

    checkpoint = config['model_path'] + 'model-on-CIFAR10__optimizer-{}__train-{}.pth'.format(
        config['optimizer']['type'],
        config['epochs']
    )
    torch.save(model.state_dict(), checkpoint)
    logging.info("Model checkpoint saved to %s" % checkpoint)


    ######################
    # Evaluate the model #
    ######################
    test_loss = 0.0
    classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
    class_correct = list(0. for i in range(10))
    class_total = list(0. for i in range(10))
    model.eval() # prep model for evaluation
    for data, target in test_loader:
        data, target = data.to(config['device']), target.to(config['device'])

        # forward pass: compute predicted outputs by passing inputs to the model
        output = model(data)
        # calculate the loss
        loss = criterion(output, target)
        # update test loss 
        test_loss += loss.item()*data.size(0)
        # convert output probabilities to predicted class
        _, pred = torch.max(output, 1)
        # compare predictions to true label
        correct = np.squeeze(pred.eq(target.data.view_as(pred)))
        # calculate test accuracy for each object class
        for i in range(len(target)):
            label = target.data[i]
            class_correct[label] += correct[i].item()
            class_total[label] += 1

    # calculate and print avg test loss
    test_loss = test_loss/len(test_loader.sampler)
    logging.info('Test Loss: {:.6f}\n'.format(test_loss))

    results = []
    for i in range(10):
        if class_total[i] > 0:
            logging.info('Test Accuracy of %5s: %.5f%% (%2d/%2d)' % (
                classes[i], 100 * class_correct[i] / class_total[i],
                np.sum(class_correct[i]), np.sum(class_total[i])))
            results.append([classes[i], 100 * class_correct[i] / class_total[i],
                np.sum(class_correct[i]), np.sum(class_total[i])])
        else:
            logging.info('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

    logging.info('Test Accuracy (Overall): %2d%% (%2d/%2d)' % (
        100. * np.sum(class_correct) / np.sum(class_total),
        np.sum(class_correct), np.sum(class_total)))

    results = pd.DataFrame(results)
    results.to_csv(
        config['model_path'] + 'model-on-CIFAR10__Accuracy__Model-train-{}.tsv'.format(
            config['epochs'], 
        ), 
        index = False, 
        header = ['Class', 'Accuracy', 'Right_Instances', 'Total_Instances'],
        sep = '\t'
    )