def evaluate(model, generator, data_type, devices, max_iteration, cuda):
    """Evaluate
    
    Args:
      model: object.
      generator: object.
      data_type: 'train' | 'validate'.
      devices: list of devices, e.g. ['a'] | ['a', 'b', 'c']
      max_iteration: int, maximum iteration for validation
      cuda: bool.
      
    Returns:
      accuracy: float
    """

    # Generate function
    generate_func = generator.generate_validate(data_type=data_type,
                                                devices=devices,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs_device = dict['output_device']  # (audios_num, classes_num)
    devices = dict['device']  # (audios_num, classes_num)
    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    predictions = np.argmax(outputs, axis=-1)  # (audios_num,)
    predictions_device = np.argmax(outputs_device, axis=-1)  # (audios_num,)

    # Evaluate
    classes_num = outputs.shape[-1]
    devices_num = outputs_device.shape[-1]

    loss = F.nll_loss(Variable(torch.Tensor(outputs)),
                      Variable(torch.LongTensor(targets))).data.numpy()
    loss = float(loss)

    loss_device = F.nll_loss(Variable(torch.Tensor(outputs_device)),
                             Variable(torch.LongTensor(devices))).data.numpy()
    loss_device = float(loss_device)

    confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                  classes_num)

    accuracy = calculate_accuracy(targets,
                                  predictions,
                                  classes_num,
                                  average='macro')
    accuracy_device = calculate_accuracy(devices,
                                         predictions_device,
                                         devices_num,
                                         average='macro')

    return accuracy, loss, accuracy_device, loss_device
예제 #2
0
    def fit(self, train_data, batch_sz):

        dataset, capacity = make_dataset(train_data, batch_sz)
        dataset = dataset.shuffle(buffer_size=100)
        dataset = dataset.batch(batch_sz)
        dataset = dataset.repeat(5)

        iterator = dataset.make_one_shot_iterator()

        next_examples, next_labels = iterator.get_next()

        logits, predicted_classes = self.forward_classifier(next_examples)

        accuracy = calculate_accuracy(predicted_classes, next_labels)

        loss = tf.losses.softmax_cross_entropy(onehot_labels=next_labels, logits=logits)

        self.global_step = tf.train.get_or_create_global_step()

        train_op = tf.train.AdamOptimizer().minimize(loss, global_step=self.global_step)

        if not os.path.exists('tmp/'):
            os.makedirs('tmp/')

        with tf.train.MonitoredTrainingSession( hooks=[self.hook], config=self.config) as sess:
            while not sess.should_stop():
                sess.run(train_op)
                acc = sess.run(accuracy)
                print("Train accuracy: " + str(acc))
예제 #3
0
def evaluate(model, generator, data_type, max_iteration, cuda):
    """Evaluate. 
    
    Args:
      model: object
      generator: object
      data_type: string, 'train' | 'validate'
      max_iteration: int, maximum iteration for validation
      cuda: bool
      
    Returns:
      accuracy: float
      auc: float
    """

    generate_func = generator.generate_validate(data_type=data_type,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Inference
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True,
                   return_bottleneck=False)

    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    # Evaluate
    accuracy = calculate_accuracy(targets, outputs)
    auc = calculate_auc(targets, outputs)

    return accuracy, auc
예제 #4
0
def evaluate(
    model,
    generator,
    data_type,
    devices,
    max_iteration,
):
    """Evaluate

    Args:
      model: object.
      generator: object.
      data_type: 'train' | 'validate'.
      devices: list of devices, e.g. ['a'] | ['a', 'b', 'c']
      max_iteration: int, maximum iteration for validation

    Returns:
      accuracy: float
    """

    # Generate function
    # if data_type == 'train':
    generate_func = generator.generate_validate(data_type=data_type,
                                                devices=devices,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   return_target=True)

    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    predictions = np.argmax(outputs, axis=-1)  # (audios_num,)

    # Evaluate
    classes_num = outputs.shape[-1]

    # categorical_crossentropy 必须配合softmax使用 binary_crossentropy 配合sigmoid 使用
    loss = K.mean(
        keras.metrics.categorical_crossentropy(K.constant(targets),
                                               K.constant(outputs)))
    loss = K.eval(loss)

    # confusion_matrix = calculate_confusion_matrix(
    #     targets, predictions, classes_num)

    targets = np.argmax(targets, axis=-1)

    accuracy = calculate_accuracy(targets,
                                  predictions,
                                  classes_num,
                                  average='macro')
    return accuracy, loss
예제 #5
0
def evaluate(data, model, device):
    with torch.no_grad():
        data_loader = DataLoader(data, batch_size=512)
        model.to(device)
        model.eval()
        total_accuracy = 0
        for index, batch in enumerate(data_loader):
            predicted, labels = perform_forward_loop(model, batch, device)
            total_accuracy += calculate_accuracy(predicted, labels)
        return total_accuracy / (index + 1)
예제 #6
0
def inference_validation(args):

    # Arugments & parameters
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    validate = True

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', 'development.h5')

    validation_csv = os.path.join(workspace, 'validation.csv')

    model_path = os.path.join(workspace, 'models', filename,
                              'holdout_fold{}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Load model
    model = Model()
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Data generator
    generator = DataGenerator(hdf5_path=hdf5_path,
                              batch_size=batch_size,
                              validation_csv=validation_csv,
                              holdout_fold=holdout_fold)

    generate_func = generator.generate_validate(data_type='validate',
                                                shuffle=False,
                                                max_iteration=None)

    # Inference
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True,
                   return_bottleneck=False)

    outputs = dict['output']
    targets = dict['target']
    itemids = dict['itemid']

    # Evaluate
    va_acc = calculate_accuracy(targets, outputs)
    va_auc = calculate_auc(targets, outputs)

    logging.info('va_acc: {:.3f}, va_auc: {:.3f}'.format(va_acc, va_auc))
예제 #7
0
def evaluate(model, generator, data_type, cuda):
    """Evaluate
    
    Args:
      model: object.
      generator: object.
      data_type: 'train' | 'validate'.
      cuda: bool.
      
    Returns:
      accuracy: float
      mapk: float
    """

    if data_type == 'train':
        max_audios_num = 1000  # A small portion of training data to evaluate

    elif data_type == 'validate':
        max_audios_num = None  # All evaluation data to evaluate

    generate_func = generator.generate_validate_slices(
        data_type=data_type,
        manually_verified_only=True,
        shuffle=True,
        max_audios_num=max_audios_num)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs = dict['output']  # (audios_num, patches_num, classes_num)
    targets = dict['target']  # (audios_num,)

    agg_outputs = aggregate_outputs(outputs)
    '''(audios_num, classes_num)'''

    predictions = np.argmax(agg_outputs, axis=-1)
    '''(audios_num,)'''

    sorted_indices = np.argsort(agg_outputs, axis=-1)[:, ::-1][:, :kmax]
    '''(audios_num, kmax)'''

    # Accuracy
    accuracy = calculate_accuracy(predictions, targets)

    # mAP
    mapk_value = mapk(actual=[[e] for e in targets],
                      predicted=[e.tolist() for e in sorted_indices],
                      k=kmax)

    return accuracy, mapk_value
예제 #8
0
def model_validate(classifier, class_wise_accuracy=False, plot_confusion_matrix=False):
    x_val, y_val = prepare_data(datatype='validate')
    if class_wise_accuracy:
        predict = classifier.predict(x_val)
        if plot_confusion_matrix:
            cm = calculate_confusion_matrix(y_val, predict, 10)
            plot_confusion_matrix2(cm, "svm", cfg.labels)
        class_wise_accuracy = calculate_accuracy(y_val, predict, 10)
        print_accuracy(class_wise_accuracy, cfg.labels)
        score = np.mean(class_wise_accuracy)
    else:

        score = classifier.score(x_val, y_val)
        print('The accuracy of validation: {:.4f}'.format(score))
    return score
예제 #9
0
    def evaluate(self, test_data, batch_sz):

        dataset, capacity = make_dataset(test_data, batch_sz)
        dataset = dataset.batch(batch_sz)

        self.hook.is_training = False
        iterator = dataset.make_one_shot_iterator()
        next_examples, next_labels = iterator.get_next()

        logits, predicted_classes = self.forward_classifier(next_examples)
        accuracy = calculate_accuracy(predicted_classes, next_labels)

        with tf.train.MonitoredTrainingSession(hooks=[self.hook]) as sess:

            while not sess.should_stop():
                acc = sess.run(accuracy)
                print("Test accuracy: " + str(acc))
예제 #10
0
def stack_two_stage():
    X, y = prepare_data('train')
    X_predict, y_predict = prepare_data('validate')
    dataset_blend_train = np.load('data.npz.npy')
    dataset_blend_test = np.load('label.npz.npy')

    clf = GradientBoostingClassifier(learning_rate=0.005,
                                     subsample=0.8,
                                     random_state=10,
                                     n_estimators=600)
    clf.fit(dataset_blend_train, y)
    y_submission = clf.predict(dataset_blend_test)

    # print("Linear stretch of predictions to [0,1]")
    # y_submission = (y_submission - y_submission.min()) / (y_submission.max() - y_submission.min())
    print("blend result")
    print("auc Score: %f" % (calculate_accuracy(
        y_predict, y_submission, classes_num=10, average='macro')))
예제 #11
0
def evaluate(model, generator, data_type, max_iteration, cuda):
    """Evaluate. 
    
    Args:
      model: object
      generator: object
      data_type: string, 'train' | 'validate'
      max_iteration: int, maximum iteration for validation
      cuda: bool
      
    Returns:
      accuracy: float
      f1_score: float
      loss: float
    """

    generate_func = generator.generate_validate(data_type=data_type,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Inference
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    # Metrics
    loss = F.nll_loss(torch.Tensor(outputs), torch.LongTensor(targets)).numpy()
    loss = float(loss)

    predictions = np.argmax(outputs, axis=-1)

    accuracy = calculate_accuracy(targets, predictions)

    f1_score = calculate_f1_score(targets, predictions, average='macro')

    return accuracy, f1_score, loss
예제 #12
0
def evaluate(model, generator, data_type, max_iteration, cuda):
    """Evaluate
    
    Args:
      model: object.
      generator: object.
      data_type: 'train' | 'validate'.
      max_iteration: int, maximum iteration for validation
      cuda: bool.
      
    Returns:
      accuracy: float
    """

    # Generate function
    generate_func = generator.generate_validate(data_type=data_type,
                                                shuffle=True,
                                                max_iteration=max_iteration)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs = dict['output']  # (audios_num, classes_num)
    targets = dict['target']  # (audios_num, classes_num)

    predictions = np.argmax(outputs, axis=-1)  # (audios_num,)

    # Evaluate
    classes_num = outputs.shape[-1]

    loss = F.nll_loss(torch.Tensor(outputs), torch.LongTensor(targets)).numpy()
    loss = float(loss)

    accuracy = calculate_accuracy(targets, predictions)

    return accuracy, loss
def inference_validation_data(args):

    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    validation = True
    classes_num = len(labels)

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold1_train.txt')

    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.txt'.format(holdout_fold))

    model_path = os.path.join(workspace, 'models', subdir, filename,
                              'holdout_fold={}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Load model
    model = Model(classes_num)
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Predict & evaluate
    for device in devices:

        print('Device: {}'.format(device))

        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate',
                                                    devices=device,
                                                    shuffle=False)
        # Inference
        dict = forward(model=model,
                       generate_func=generate_func,
                       cuda=cuda,
                       return_target=True)

        outputs = dict['output']  # (audios_num, classes_num)
        targets = dict['target']  # (audios_num, classes_num)
        predictions = np.argmax(outputs, axis=-1)
        classes_num = outputs.shape[-1]

        # Evaluate
        confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                      classes_num)

        class_wise_accuracy = calculate_accuracy(targets, predictions,
                                                 classes_num)

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)
        logging.info('confusion_matrix: \n', confusion_matrix)
예제 #14
0
def inference_validation_data(args):
    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename

    # data_type = args.data_type

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development_hpss_lrad.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold{}_train.txt'.format(holdout_fold))

    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.txt'.format(holdout_fold))

    # model_path = os.path.join(workspace, 'models', subdir, filename,
    #                           'holdout_fold={}'.format(holdout_fold),
    #                           'md_{}_iters_max_attention2_2019-05-31 10:33:46.h5'.format(iteration))
    # model_path = os.path.join(workspace, 'appendixes',
    #                           'md_{}_iters_max_attention2_2019-05-31 00:48:09.h5'.format(iteration))

    model_path = '/home/r506/Downloads/dcase2018_task1-master/models/' \
                 'TUT-urban-acoustic-scenes-2018-development/main_keras/' \
                 'new/DAN-DFF/md_9700_iters_max_attention2_78.1.h5'
    model = keras.models.load_model(model_path)

    # Predict & evaluate
    for device in devices:
        print('Device: {}'.format(device))
        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate',
                                                    devices=device,
                                                    shuffle=False)

        # Inference
        dict = forward(model=model,
                       generate_func=generate_func,
                       return_target=True)

        outputs = dict['output']  # (audios_num, classes_num)
        targets = dict['target']  # (audios_num, classes_num)

        # 多分类交叉熵
        predictions = np.argmax(outputs, axis=-1)

        classes_num = outputs.shape[-1]

        # Evaluate
        targets = np.argmax(targets, axis=-1)
        confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                      classes_num)

        class_wise_accuracy = calculate_accuracy(targets, predictions,
                                                 classes_num)

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)

        # Plot confusion matrix
        # plot_confusion_matrix(
        #     confusion_matrix,
        #     title='Device {}'.format(device.upper()),
        #     labels=labels,
        #     values=class_wise_accuracy)
        np.save('data5', confusion_matrix)
        plot_confusion_matrix2(
            confusion_matrix,
            title='The best performance of the proposed DAN-DFF method',
            labels=labels,
        )
예제 #15
0
def inference_validation_data(args):

    # Arugments & parameters
    dataset_dir = args.dataset_dir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    cuda = args.cuda
    filename = args.filename

    validation = True
    labels = config.labels
    classes_num = len(config.labels)

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', 'development.h5')

    train_txt = os.path.join(dataset_dir, 'evaluation_setup',
                             'fold{}_train.txt'.format(holdout_fold))

    validate_txt = os.path.join(dataset_dir, 'evaluation_setup',
                                'fold{}_evaluate.txt'.format(holdout_fold))

    model_path = os.path.join(workspace, 'models', filename,
                              'holdout_fold{}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Model
    model = Model(classes_num)

    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Data generator
    generator = DataGenerator(hdf5_path=hdf5_path,
                              batch_size=batch_size,
                              train_txt=train_txt,
                              validate_txt=validate_txt)

    generate_func = generator.generate_validate(data_type='validate',
                                                shuffle=False,
                                                max_iteration=None)

    # Inference
    inference_time = time.time()

    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs = dict['output']
    targets = dict['target']
    audio_names = dict['audio_name']

    logging.info('Inference time: {:.4f} s'.format(time.time() -
                                                   inference_time))

    predictions = np.argmax(outputs, axis=-1)

    # Calculate statistics
    accuracy = calculate_accuracy(targets, predictions)

    class_wise_f1_score = calculate_f1_score(targets,
                                             predictions,
                                             average=None)

    # Print statistics
    logging.info('Averaged accuracy: {:.4f}'.format(accuracy))

    logging.info('{:<30}{}'.format('class_name', 'f1_score'))
    logging.info('---------------------------------------')

    for (n, lb) in enumerate(labels):
        logging.info('{:<30}{:.4f}'.format(lb, class_wise_f1_score[n]))

    logging.info('---------------------------------------')
    logging.info('{:<30}{:.4f}'.format('Average',
                                       np.mean(class_wise_f1_score)))
예제 #16
0
def inference_validation_data(args):

    # Arugments & parameters
    dataset_dir = args.dataset_dir
    subdir = args.subdir
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    labels = config.labels

    if 'mobile' in subdir:
        devices = ['a', 'b', 'c']
    else:
        devices = ['a']

    validation = True
    classes_num = len(labels)
    devices_num = len(devices)

    # Paths
    hdf5_path = os.path.join(workspace, 'features', 'logmel', subdir,
                             'development.h5')

    dev_train_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                 'fold1_train.csv')

    dev_validate_csv = os.path.join(dataset_dir, subdir, 'evaluation_setup',
                                    'fold{}_evaluate.csv'.format(holdout_fold))

    model_path = os.path.join(workspace, 'models', subdir, filename,
                              'holdout_fold={}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    # Load model
    model = Model(classes_num, devices_num, cond_layer)
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Predict & evaluate
    for device in devices:

        print('Device: {}'.format(device))

        # Data generator
        generator = DataGenerator(hdf5_path=hdf5_path,
                                  batch_size=batch_size,
                                  dev_train_csv=dev_train_csv,
                                  dev_validate_csv=dev_validate_csv)

        generate_func = generator.generate_validate(data_type='validate',
                                                    devices=device,
                                                    shuffle=False)

        # Inference
        dict = forward(model=model,
                       generate_func=generate_func,
                       cuda=cuda,
                       return_target=True)

        outputs = dict['output']  # (audios_num, classes_num)
        targets = dict['target']  # (audios_num, classes_num)

        #	(outputs, targets, audio_names, outputs_heatmap) = forward_heatmap(model=model,###################
        #                                   generate_func=generate_func,
        #                                   cuda=cuda,
        #                                   has_target=True)

        predictions = np.argmax(outputs, axis=-1)

        classes_num = outputs.shape[-1]

        ##########################################################################
        #        heatmaps = []
        #        classes = []
        #        for i in range(0, len(predictions)):
        #            pred_num = predictions[i]
        #            if pred_num == targets[i]:
        #                if not (pred_num in classes):
        #                    classes.append(pred_num)
        #                    print 'classes:'
        #                    print classes
        #                    logging.info('\n')
        #                    logging.info(outputs_heatmap[i][pred_num])
        #                    logging.info('class num: ')
        #                    logging.info(pred_num)
        #                    heatmaps.append(outputs_heatmap[i][pred_num])

        ############################################################################

        # Evaluate
        confusion_matrix = calculate_confusion_matrix(targets, predictions,
                                                      classes_num)

        class_wise_accuracy = calculate_accuracy(targets, predictions,
                                                 classes_num)

        # save
        # np.save(os.path.join(workspace, 'logs', 'main_pytorch', str(device)+"heatmap.npy"),heatmaps)##############
        # np.save(os.path.join(workspace, 'logs', 'main_pytorch', str(device)+"confusionMat.npy"),confusion_matrix)#########

        # Print
        print_accuracy(class_wise_accuracy, labels)
        print('confusion_matrix: \n', confusion_matrix)
        logging.info('confusion_matrix: \n', confusion_matrix)
예제 #17
0
def inference_validation_data(args):

    # Arguments & parameters
    workspace = args.workspace
    holdout_fold = args.holdout_fold
    iteration = args.iteration
    filename = args.filename
    cuda = args.cuda

    classes_num = len(config.labels)

    # Paths
    model_path = os.path.join(workspace, 'models', filename,
                              'holdout_fold{}'.format(holdout_fold),
                              'md_{}_iters.tar'.format(iteration))

    hdf5_path = os.path.join(workspace, 'features', 'logmel', 'development.h5')

    validation_csv = os.path.join(workspace, 'validate_meta.csv')

    stats_pickle_path = os.path.join(workspace, 'stats', filename,
                                     'holdout_fold{}'.format(holdout_fold),
                                     '{}_iters.p'.format(iteration))

    create_folder(os.path.dirname(stats_pickle_path))

    # Model
    model = Model(classes_num)

    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if cuda:
        model.cuda()

    # Data generator
    generator = DataGenerator(hdf5_path=hdf5_path,
                              batch_size=batch_size,
                              time_steps=time_steps,
                              validation_csv=validation_csv,
                              holdout_fold=holdout_fold)

    generate_func = generator.generate_validate_slices(
        data_type='validate',
        manually_verified_only=True,
        shuffle=False,
        max_audios_num=None)

    # Forward
    dict = forward(model=model,
                   generate_func=generate_func,
                   cuda=cuda,
                   return_target=True)

    outputs = dict['output']  # (audios_num, patches_num, classes_num)
    targets = dict['target']  # (audios_num,)

    agg_outputs = aggregate_outputs(outputs)
    '''(audios_num, classes_num)'''

    predictions = np.argmax(agg_outputs, axis=-1)
    '''(audios_num,)'''

    sorted_indices = np.argsort(agg_outputs, axis=-1)[:, ::-1][:, :kmax]
    '''(audios_num, kmax)'''

    # Accuracy
    accuracy = calculate_accuracy(predictions, targets)

    # mAP
    mapk_value = mapk(actual=[[e] for e in targets],
                      predicted=[e.tolist() for e in sorted_indices],
                      k=kmax)

    # Print
    logging.info('')
    logging.info('iteration: {}'.format(iteration))
    logging.info('accuracy: {:.3f}'.format(accuracy))
    logging.info('mapk: {:.3f}'.format(mapk_value))

    (class_wise_accuracy, correctness,
     total) = print_class_wise_accuracy(predictions, targets)

    # Save stats for current holdout training
    dict = {
        'correctness': correctness,
        'total': total,
        'accuracy': accuracy,
        'mapk': mapk_value
    }

    pickle.dump(dict, open(stats_pickle_path, 'wb'))

    logging.info('Write out stat to {}'.format(stats_pickle_path))
def main():
    args = parse_args()

    ## Create an output dir
    output_dir_path = args.od + args.en
    if not os.path.exists(output_dir_path):
        os.makedirs(output_dir_path)
        dir_name=output_dir_path 
        tb_dirname = output_dir_path + '/tb_logdir'
    else:
        counter=1
        dir_name = output_dir_path
        new_dir_name = dir_name
        while os.path.exists(new_dir_name):
            new_dir_name = dir_name + "_" + str(counter)
            counter +=1 
        os.makedirs(new_dir_name)
        dir_name=new_dir_name
        tb_dirname = dir_name + "/tb_logdir" 

    print("===>> Output folder = {}".format(dir_name))
    
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)

    if args.cuda:
        torch.backends.cudnn.benchmark = True
        torch.cuda.manual_seed(args.seed)
    
    loaders = Cifar10Loaders()
    train_loader = loaders.train_loader()
    test_loader = loaders.test_loader()

    if args.netqat:
        base_model = cnn()
        model=cnn(qat_mode=True)

        ## Loading checkpoints here
        checkpoint = torch.load(args.load_ckpt)

        ##making sure that the checkpoint was loaded from disk correctly
        base_model.load_state_dict(checkpoint['model_state_dict'],strict=True)
        base_model_sd = base_model.state_dict()
        ##renaming the keys only to match the model with qat nodes. Only convs and BNs's will be changed
        base_model_sd_new = map_ckpt_names(base_model_sd)

        ##Updating the values of keys for qat model 
        for k in base_model_sd_new.keys():
            try:
                model.state_dict()[k].copy_(base_model_sd_new[k])
                print("{} successfully loaded".format(k))
            except:
                print("{} didnt load".format(k))

    else:
        model=cnn()

    ## Instantiate tensorboard logs
    writer = SummaryWriter(tb_dirname)
    images, labels = iter(train_loader).next()
    img_grid = torchvision.utils.make_grid(images)
    writer.add_image('cifar-10', img_grid)
    #writer.add_graph(model,images)

    if args.cuda:
        model = model.cuda()
        if args.parallel:
            model = nn.DataParallel(model,device_ids=range(torch.cuda.device_count()))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
    best_test_accuracy=0
    print("===>> Training started")

    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    loss = checkpoint['loss']
    print("===>>> Checkpoint loaded successfully from {} at epoch {} ".format(args.load_ckpt,epoch))

    print(model)
    for epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):
        running_loss=0.0
        start=time.time()
        model.train()
        for i, data in enumerate(train_loader,0):
            inputs, labels = data

            if args.cuda:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            
            outputs = model(inputs)
            loss = criterion(outputs,labels)
            loss.backward()
            optimizer.step()

            running_loss +=loss.item()
            if i %1000 == 999:
                writer.add_scalar('train_loss',running_loss/1000, epoch * len(train_loader) + i)
                writer.add_scalar('learning_rate',optimizer.param_groups[0]['lr'],epoch * len(train_loader) + i)
        
        if epoch > 0 and  epoch % args.lrdt == 0:
            print("===>> decaying learning rate at epoch {}".format(epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = param_group['lr'] * 0.94


        running_loss /= len(train_loader)
        end = time.time()
        test_accuracy = calculate_accuracy(model,test_loader)
        writer.add_scalar('test_accuracy',test_accuracy, epoch)

        ## Adding histograms after every epoch
        for tag, value in model.named_parameters():
            tag = tag.replace('.','/')
            writer.add_histogram(tag,value.data.cpu().numpy(),epoch)

        print("Epoch: {0} | Loss: {1} | Test accuracy: {2}| Time Taken (sec): {3} ".format(epoch+1, np.around(running_loss,6), test_accuracy, np.around((end-start),4)))

        best_ckpt_filename = dir_name + "/ckpt_" + str(epoch) +'.pth'
        ##Save the best checkpoint
        if test_accuracy > best_test_accuracy:
            best_test_accuracy = test_accuracy
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': running_loss,
                }, best_ckpt_filename)
    writer.close()
    print("Training finished")