def validation(model, val_loader, epoch, writer):
    # set evaluate mode
    model.eval()

    total_correct, total_label = 0, 0
    total_correct_hb, total_label_hb = 0, 0
    total_correct_fb, total_label_fb = 0, 0
    hist = np.zeros((args.num_classes, args.num_classes))
    hist_hb = np.zeros((args.hbody_cls, args.hbody_cls))
    hist_fb = np.zeros((args.fbody_cls, args.fbody_cls))

    # Iterate over data.
    bar = Bar('Processing {}'.format('val'), max=len(val_loader))
    bar.check_tty = False
    for idx, batch in enumerate(val_loader):
        image, target, hlabel, flabel, _ = batch
        image, target, hlabel, flabel = image.cuda(), target.cuda(
        ), hlabel.cuda(), flabel.cuda()
        with torch.no_grad():
            h, w = target.size(1), target.size(2)
            outputs = model(image)
            outputs = gather(outputs, 0, dim=0)
            preds = F.interpolate(input=outputs[0][-1],
                                  size=(h, w),
                                  mode='bilinear',
                                  align_corners=True)
            preds_hb = F.interpolate(input=outputs[1][-1],
                                     size=(h, w),
                                     mode='bilinear',
                                     align_corners=True)
            preds_fb = F.interpolate(input=outputs[2][-1],
                                     size=(h, w),
                                     mode='bilinear',
                                     align_corners=True)
            if idx % 50 == 0:
                img_vis = inv_preprocess(image, num_images=args.save_num)
                label_vis = decode_predictions(target.int(),
                                               num_images=args.save_num,
                                               num_classes=args.num_classes)
                pred_vis = decode_predictions(torch.argmax(preds, dim=1),
                                              num_images=args.save_num,
                                              num_classes=args.num_classes)

                # visual grids
                img_grid = torchvision.utils.make_grid(
                    torch.from_numpy(img_vis.transpose(0, 3, 1, 2)))
                label_grid = torchvision.utils.make_grid(
                    torch.from_numpy(label_vis.transpose(0, 3, 1, 2)))
                pred_grid = torchvision.utils.make_grid(
                    torch.from_numpy(pred_vis.transpose(0, 3, 1, 2)))
                writer.add_image('val_images', img_grid,
                                 epoch * len(val_loader) + idx + 1)
                writer.add_image('val_labels', label_grid,
                                 epoch * len(val_loader) + idx + 1)
                writer.add_image('val_preds', pred_grid,
                                 epoch * len(val_loader) + idx + 1)

            # pixelAcc
            correct, labeled = batch_pix_accuracy(preds.data, target)
            correct_hb, labeled_hb = batch_pix_accuracy(preds_hb.data, hlabel)
            correct_fb, labeled_fb = batch_pix_accuracy(preds_fb.data, flabel)
            # mIoU
            hist += fast_hist(preds, target, args.num_classes)
            hist_hb += fast_hist(preds_hb, hlabel, args.hbody_cls)
            hist_fb += fast_hist(preds_fb, flabel, args.fbody_cls)

            total_correct += correct
            total_correct_hb += correct_hb
            total_correct_fb += correct_fb
            total_label += labeled
            total_label_hb += labeled_hb
            total_label_fb += labeled_fb
            pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
            IoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
            pixAcc_hb = 1.0 * total_correct_hb / (np.spacing(1) +
                                                  total_label_hb)
            IoU_hb = round(np.nanmean(per_class_iu(hist_hb)) * 100, 2)
            pixAcc_fb = 1.0 * total_correct_fb / (np.spacing(1) +
                                                  total_label_fb)
            IoU_fb = round(np.nanmean(per_class_iu(hist_fb)) * 100, 2)
            # plot progress
            bar.suffix = '{} / {} | pixAcc: {pixAcc:.4f}, mIoU: {IoU:.4f} |' \
                         'pixAcc_hb: {pixAcc_hb:.4f}, mIoU_hb: {IoU_hb:.4f} |' \
                         'pixAcc_fb: {pixAcc_fb:.4f}, mIoU_fb: {IoU_fb:.4f}'.format(idx + 1, len(val_loader),
                                                                                    pixAcc=pixAcc, IoU=IoU,
                                                                                    pixAcc_hb=pixAcc_hb, IoU_hb=IoU_hb,
                                                                                    pixAcc_fb=pixAcc_fb, IoU_fb=IoU_fb)
            bar.next()

    print('\n per class iou part: {}'.format(per_class_iu(hist) * 100))
    print('per class iou hb: {}'.format(per_class_iu(hist_hb) * 100))
    print('per class iou fb: {}'.format(per_class_iu(hist_fb) * 100))

    mIoU = round(np.nanmean(per_class_iu(hist)) * 100, 2)
    mIoU_hb = round(np.nanmean(per_class_iu(hist_hb)) * 100, 2)
    mIoU_fb = round(np.nanmean(per_class_iu(hist_fb)) * 100, 2)

    writer.add_scalar('val_pixAcc', pixAcc, epoch)
    writer.add_scalar('val_mIoU', mIoU, epoch)
    writer.add_scalar('val_pixAcc_hb', pixAcc_hb, epoch)
    writer.add_scalar('val_mIoU_hb', mIoU_hb, epoch)
    writer.add_scalar('val_pixAcc_fb', pixAcc_fb, epoch)
    writer.add_scalar('val_mIoU_fb', mIoU_fb, epoch)
    bar.finish()

    return pixAcc, mIoU
def createDataSet(file):
    path = os.path.abspath(file)
    pos = path.rfind('/')
    tokens = path[pos + 1:].split('_')
    descriptor_id = tokens[6]
    scene_name = tokens[2]
    scene_name = path[:pos] + '/' + scene_name + '_d.pcd'
    file_descriptor = path[:pos] + '/tmp' + descriptor_id + '.csv'
    labels = np.genfromtxt(file_descriptor,
                           dtype='str',
                           skip_header=1,
                           delimiter=',')
    print('Affordances in descriptor %d' % labels.shape[0])
    fileId = tokens[-1]
    tokens = fileId.split('.')
    fileId = tokens[0]
    print(fileId)
    res_data_file = path[:pos] + '/' + fileId + '_goodPointsX.pcd'
    res_points_file = path[:pos] + '/' + fileId + '_goodPoints.pcd'

    data = load_pcd_data(res_data_file, cols=None)
    #print(data.shape)
    points = load_pcd_data(res_points_file, cols=(0, 1, 2))
    real_c_data = load_pcd_data(res_points_file,
                                cols=(3, ),
                                dataType=np.uint32)
    #real_c_data=np.array(colors[:,-1],dtype=np.int32)
    red = np.array((real_c_data >> 16) & 0x0000ff,
                   dtype=np.uint8).reshape(-1, 1)
    green = np.array((real_c_data >> 8) & 0x0000ff,
                     dtype=np.uint8).reshape(-1, 1)
    blue = np.array((real_c_data) & 0x0000ff, dtype=np.uint8).reshape(-1, 1)

    real_c_data = np.concatenate((red, green, blue), axis=1)

    perPoint = np.sum(real_c_data, axis=1)
    bounds = np.cumsum(perPoint)
    #print(bounds)
    howMany = np.zeros((labels.shape[0], 1), dtype=np.int32)
    all_data = np.zeros((data.shape[0], 6))

    for i in range(all_data.shape[0]):
        point_id = np.nonzero(bounds > i)[0][0]
        all_data[i, :3] = points[point_id, :]
        all_data[i, 3:] = data[i, :3]

    for i in range(labels.shape[0]):
        success = np.nonzero(all_data[:, 3] == i)[0]
        success2 = np.nonzero(all_data[success, 2] > 0.3)[0]
        howMany[i] = success2.size

    ids_target = np.nonzero(howMany > n_samples)[0]
    print('Real found: %d' % ids_target.size)
    if n_orientations > 1:
        name = 'AffordancesDataset_augmented_names.txt'
    else:
        name = 'AffordancesDataset_names.txt'
    with open(name, "w") as text_file:
        for i in range(ids_target.shape[0]):
            text_file.write(
                "%d:%s-%s\n" %
                (i, labels[ids_target[i], 0], labels[ids_target[i], 2]))
    #
    #print(labels[ids_target,1:])

    all_points = np.zeros((ids_target.size, n_samples, 3))
    all_points_score = np.zeros((ids_target.size, n_samples))
    for i in range(ids_target.shape[0]):
        #get the 3D point for the response
        success = np.nonzero((all_data[:, 3] == ids_target[i])
                             & (all_data[:, 2] > 0.3))[0]
        sorted_ids = np.argsort(all_data[success, 5])
        print(
            'Sampling for %s %s in %d points(%f,%f)' %
            (labels[ids_target[i], 0], labels[ids_target[i], 2], success.size,
             np.max(all_data[success, 5]), np.min(all_data[success, 5])))
        sorted_ids = sorted_ids[::-1]
        for j in range(n_samples):
            all_points[i, j, :] = all_data[success[sorted_ids[j]], :3]
            all_points_score[i, j] = all_data[success[sorted_ids[j]], 5]
        #print('Min %f max %f'%(all_points_score[i,0],all_points_score[i,-1]))
    labels_d = np.arange(ids_target.size)
    print(
        'Sampled points maxZ %f minZ %f' % (np.max(all_points[:, :, 2].reshape(
            1, -1)), np.min(all_points[:, :, 2].reshape(1, -1))))

    #sys.exit()

    if n_orientations > 1:
        name = 'dataPointsAffordances_augmented.h5'
    else:
        name = 'dataPointsAffordances.h5'
    if os.path.exists(name):
        os.system('rm %s' % (name))
    save_h5(name, all_points, labels_d, 'float32', 'uint8')

    #get dense cloud
    dense_sceneCloud = pypcd.PointCloud.from_path(scene_name).pc_data
    pc_array = np.array([[x, y, z] for x, y, z in dense_sceneCloud])

    #generate pointclouds that were not detected to test against single example training
    good_points_file = path[:pos] + '/' + fileId + '_goodPointsIds.pcd'
    sampled_points_file = path[:pos] + '/' + fileId + '_samplePointsIds.pcd'
    sampled_ids = np.sort(
        load_pcd_data(sampled_points_file, cols=(0, ), dataType=np.int32))
    good_ids = np.sort(
        load_pcd_data(good_points_file, cols=(0, ), dataType=np.int32))
    non_affordance = np.setdiff1d(np.arange(sampled_ids.shape[0]), good_ids)
    sampled_points_file = path[:pos] + '/' + fileId + '_samplePoints.pcd'
    sampled_points = load_pcd_data(sampled_points_file, cols=(0, 1, 2))
    np.random.shuffle(non_affordance)
    print('Getting 1024 negative examples ')
    #shuffle negative examples ids
    bar = Bar('Processing', max=1024)
    negative_examples = np.zeros((1024, n_points, 3), dtype=np.float32)
    for i in range(1024):
        point = pc_array[non_affordance[i], ...]
        voxel = getVoxel(point, max_rad, pc_array)
        minP = np.min(voxel, 0)
        maxP = np.max(voxel, 0)
        dist = np.linalg.norm(maxP - minP, axis=0) / 2
        print('RAD %f rad %f estimation %f' %
              (dist, max_rad, max_rad * np.sqrt(3)))
        sample = sample_cloud(voxel, n_points)
        negative_examples[i, ...] = sample
        bar.next()
    bar.finish()
    negative_labels = 100 * np.ones((1024, 1), dtype=np.uint8)
    print('Got %d negative examples' % (negative_examples.shape[0]))
    print(negative_examples[0, 0, :])
    name = 'AffordancesDataset_negatives.h5'
    if os.path.exists(name):
        os.system('rm %s' % (name))
    save_h5(name, negative_examples, negative_labels, 'float32', 'uint8')
    #sys.exit()

    print('Sampling actual voxels from %s of %d points' %
          (scene_name, pc_array.shape[0]))
    dataSet_data = np.zeros(
        (all_points.shape[0] * all_points.shape[1] * n_orientations, n_points,
         3),
        dtype=np.float32)
    dataSet_labels = np.zeros(
        (all_points.shape[0] * all_points.shape[1] * n_orientations, 1),
        dtype=np.uint8)
    print(dataSet_data.shape)
    count = 0
    #data_type 0->centered
    data_type = 1
    #extract voxels and pointclouds for dataset
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.hold(False)
    for aff in range(all_points.shape[0]):
        print('Training examples for %s %s' %
              (labels[ids_target[aff], 0], labels[ids_target[aff], 2]))
        bar = Bar('Processing', max=all_points.shape[1])
        for n_sample in range(all_points.shape[1]):
            point = all_points[aff, n_sample, :].reshape(3, -1)
            #print(point.shape)
            voxel = getVoxel(point, max_rad, pc_array)
            if voxel.shape[0] < n_points:
                sample = aVoxel
            else:
                sample = sample_cloud(voxel, n_points)
            if data_type == 0:
                centered_sample = sample - point
            else:
                centered_sample = sample
            #rotate this voxels n_orientations around Z (up)
            for j in range(n_orientations):
                rotated_voxel = rotate_point_cloud_by_angle(
                    np.expand_dims(centered_sample, axis=0),
                    j * 2 * np.pi / n_orientations).squeeze()
                dataSet_data[count, ...] = rotated_voxel
                dataSet_labels[count] = labels_d[aff]
                count += 1
            if n_sample == 0:
                ax.scatter(rotated_voxel[:, 0],
                           rotated_voxel[:, 1],
                           rotated_voxel[:, 2],
                           s=3)
                plt.pause(0.2)
                plt.draw()
            bar.next()
        bar.finish()
    if n_orientations > 1:
        name = 'AffordancesDataset_augmented.h5'
    else:
        name = 'AffordancesDataset.h5'
    if os.path.exists(name):
        os.system('rm %s' % (name))
    save_h5(name, dataSet_data, dataSet_labels, 'float32', 'uint8')
Exemple #3
0
from progress.bar import Bar
import csv
with open('./testCode/csv/raikan_ahan.csv', encoding="utf8") as csvfile:
  test = csv.reader(csvfile)
  data = list(test)
  datass = test.[]
  row_count = len[""]
  bar = Bar('Processing', max=data)
  for i in test:
    # Do some work
    bar.next()
  bar.finish()

# import tqdm


# file_path = './testCode/csv/raikan_ahan.csv'
# with open(file_path) as file:
#     for line in tqdm(file, total=get_num_lines(file_path)):
# import csv
# import csv
# from progress.bar import IncrementalBar
# with open('./testCode/csv/raikan_ahan.csv', encoding="utf8") as csvfile:
#   test = csv.reader(csvfile)
#   data = list(test)
#   row_count = len(data)
# #   print(row_count)


#   max = 3000
#   bar = IncrementalBar(f'Word segment file ',max = max ,suffix='%(percent)d%% %(elapsed_td)s')
    def fit(self, train_domain, num_epochs, patience, optimizer, train_dir,
            dev_dir):
        """
        Trains the model.
        :param train_domain: the domain used for training
        :param num_epochs: the max number of epochs the model should be trained
        :param patience: the patience to use for early stopping
        :param optimizer: the optimizer that should be used
        :param train_dir: the directory containing the training files
        :param dev_dir: the directory containing the development files
        """
        print("Reading training data from %s..." % train_dir, flush=True)
        print("Tasks: %s" % self.task_names)
        train_X, train_Y, _, _, word2id, char2id, task2t2i = get_data(
            [train_domain], self.task_names, data_dir=train_dir, train=True)

        # get the development data of the same domain
        dev_X, dev_Y, org_X, org_Y, _, _, _ = get_data(
            [train_domain], self.task_names, word2id, char2id, task2t2i,
            data_dir=dev_dir, train=False)
        print('Length of training data:', len(train_X), flush=True)
        print('Length of validation data:', len(dev_X), flush=True)

        # store mappings of words and tags to indices
        self.set_indices(word2id, char2id, task2t2i)
        num_words = len(self.word2id)
        num_chars = len(self.char2id)

        print('Building the computation graph...', flush=True)
        self.predictors, self.char_rnn, self.wembeds, self.cembeds = \
            self.build_computation_graph(num_words, num_chars)

        if optimizer == SGD:
            trainer = dynet.SimpleSGDTrainer(self.model)
        elif optimizer == ADAM:
            trainer = dynet.AdamTrainer(self.model)
        else:
            raise ValueError('%s is not a valid optimizer.' % optimizer)

        train_data = list(zip(train_X, train_Y))

        num_iterations = 0
        num_epochs_no_improvement = 0
        best_dev_acc = 0

        print('Training model with %s for %d epochs and patience of %d.'
              % (optimizer, num_epochs, patience))
        for epoch in range(num_epochs):
            print('', flush=True)
            bar = Bar('Training epoch %d/%d...' % (epoch+1, num_epochs),
                      max=len(train_data), flush=True)

            # keep track of the # of updates, total loss, and total # of
            # predicted instances per task
            task2num_updates = {task: 0 for task in self.task_names}
            task2total_loss = {task: 0.0 for task in self.task_names}
            task2total_predicted = {task: 0.0 for task in self.task_names}
            total_loss = 0.0
            total_penalty = 0.0
            total_predicted = 0.0
            random.shuffle(train_data)

            # for every instance, we optimize the loss of the corresponding task
            for (word_indices, char_indices), task2label_id_seq in train_data:
                # get the concatenated word and char-based features for every
                # word in the sequence
                features = self.get_word_char_features(word_indices, char_indices)
                for task, y in task2label_id_seq.items():
                    placeholder_idx = self.task2tag2idx[task]['_'] if '_' in self.task2tag2idx[task] else -1
                    if task in [POS, CHUNK, NER, SRL]:
                        output, penalty = self.predict(features, task, train=True)
                        neg_logs = [pick_neg_log(pred, gold) for pred, gold in zip(output, y)]
                    elif task in [STUTT, SAARB, TSVET, VUAMC, STUTT_M, SAARB_M, TSVET_M, VUAMC_M]:
                        output, penalty = self.predict(features, task, train=True)
                        # get the index for the placeholder label; if there is no placeholder in the data, set to -1, so that every sample is regarded
                        neg_logs = [pick_neg_log(pred, gold) for pred, gold in zip(output, y) if gold != placeholder_idx]
                        # if the sentence does not contain any literal or metaphor samples, skip it; do not consider it for training
                        if not neg_logs:
                            continue
                    else:
                        raise NotImplementedError('Task %s has not been '
                                                  'implemented yet.' % task)

#                    labels = {v: k for k,v in self.task2tag2idx[task].items()}
                    loss = dynet.esum(neg_logs)
#                    loss = dynet.esum([pick_neg_log(pred, gold) for pred, gold
#                                       in zip(output, y)])
                    lv = loss.value()
                    # sum the loss and the subspace constraint penalty
                    combined_loss = loss + dynet.parameter(
                        self.constraint_weight_param, update=False) * penalty
                    total_loss += lv
                    total_penalty += penalty.value()
                    assert len(output) == len(y)
                    total_predicted += len([1 for gold in y if gold != placeholder_idx])
                    task2total_loss[task] += lv
                    task2total_predicted[task] += len([1 for gold in y if gold != placeholder_idx])
                    task2num_updates[task] += 1

                    # back-propagate through the combined loss
                    combined_loss.backward()
                    trainer.update()
                bar.next()
                num_iterations += 1

            print("\nEpoch %d. Total loss: %.3f. Total penalty: %.3f. Losses: "
                  % (epoch, total_loss / total_predicted,
                     total_penalty / total_predicted), end='', flush=True)
            for task in task2total_loss.keys():
                if task2total_predicted[task] == 0:
                    print('%s: %.3f/%.3f. ' % (task, task2total_loss[task],
                                          task2total_predicted[task]),
                          end='', flush=True)
                else:
                    print('%s: %.3f. ' % (task, task2total_loss[task] /
                                          task2total_predicted[task]),
                          end='', flush=True)
            print('', flush=True)

            # evaluate after every epoch
            dev_acc = self.evaluate(dev_X, dev_Y, org_X=None, mode='nope')

            if dev_acc > best_dev_acc:
                print('Main task %s dev acc %.4f is greater than best dev acc '
                      '%.4f...' % (self.main_task, dev_acc, best_dev_acc),
                      flush=True)
                best_dev_acc = dev_acc
                num_epochs_no_improvement = 0
                print('Saving model to directory %s...' % self.model_dir,
                      flush=True)
                self.save()
                self.evaluate(dev_X, dev_Y, org_X=org_X, mode='dev')
            else:
                print('Main task %s dev acc %.4f is lower than best dev acc '
                      '%.4f...' % (self.main_task, dev_acc, best_dev_acc),
                      flush=True)
                num_epochs_no_improvement += 1
            if num_epochs_no_improvement == patience:
                print('Early stopping...', flush=True)
                print('Loading the best performing model from %s...'
                      % self.model_dir, flush=True)
                self.model.load(self.model_file)
                break
def sampleFromFile(affordance,
                   list_of_files,
                   number_of_samples,
                   pointsPerCloud=4096):
    file_options = np.arange(len(list_of_files))
    files_to_sample = np.random.randint(len(list_of_files),
                                        size=(1, number_of_samples))
    repeated = np.bincount(files_to_sample[0, :], minlength=len(list_of_files))
    actually_sample_files = np.nonzero(repeated)[0]
    dataPoints = np.empty((number_of_samples, 6), dtype=np.float)
    dataClouds = np.empty((number_of_samples, pointsPerCloud, 3),
                          dtype=np.float32)
    start_id = 0
    actually_sampled = 0
    outOfPoints = False
    bar = Bar('Sampling ', max=number_of_samples)
    for i in range(actually_sample_files.size):
        file = list_of_files[actually_sample_files[i]] + "_newData.csv"
        pos = file.rfind('/') + 1
        if "space/" in file:
            #Need to search for the exact file
            pos_id = list_of_files[actually_sample_files[i]].rfind('/') + 1
            target_file_id = list_of_files[actually_sample_files[i]][pos_id:]
            path_to_scene = file[:
                                 pos_id] + 'All_affordances_*_' + target_file_id + '.pcd'
            someFile = glob.glob(path_to_scene)
            tokens = someFile[0].split('_')
            cloud_file = list_of_files[
                actually_sample_files[i]][:pos_id] + tokens[2]
            if "real" in tokens[2]:
                cloud_file = cloud_file + ".pcd"
            else:
                cloud_file = cloud_file + "_d.pcd"
                #if "readingroom" in cloud_file:
                #print(list_of_files[actually_sample_files[i]])
                #print(cloud_file)
                #sys.exit()
        else:
            pos_id = list_of_files[actually_sample_files[i]].rfind('/') + 1
            target_file_id = list_of_files[actually_sample_files[i]][pos_id:]
            if "DATA" in file[:pos_id]:
                path_to_scene = file[:pos_id] + '*_clean.pcd'
                someFile = glob.glob(path_to_scene)
                cloud_file = someFile[0]
            else:
                path_to_scene = file[:
                                     pos_id] + 'All_affordances_*_' + target_file_id + '.pcd'
                someFile = glob.glob(path_to_scene)
                tokens = someFile[0].split('_')
                cloud_file = list_of_files[
                    actually_sample_files[i]][:pos_id] + tokens[2] + '.pcd'
                #print(cloud_file)
                #sys.exit()
        sample_from_file = repeated[actually_sample_files[i]]
        data = np.genfromtxt(file, delimiter=",", dtype='float32')
        target_ids = np.nonzero(data[:, A_ID].astype(int) == affordance)[0]
        sorted_subset = np.argsort(data[target_ids, SCORE])
        sorted_subset = sorted_subset[::-1]
        j = 0
        k = 0
        complete_sample = False
        if not os.path.exists(cloud_file):
            print('No input cloud %s' % (cloud_file))
            return np.empty((0, 6)), np.empty((0, 0, 0))
        cloud, _ = load_pcd_data_binary(cloud_file)
        kdt = BallTree(cloud, leaf_size=5, metric='euclidean')
        while not complete_sample:
            #take points until conplete set
            dataPoints[start_id + j, :] = data[target_ids[sorted_subset[k]], :]
            point = dataPoints[start_id + j, :3]
            voxel_ids = getVoxel(point, max_rad, kdt)
            voxel = cloud[voxel_ids, :]
            actual_voxel_size = voxel.shape[0]
            if actual_voxel_size < (pointsPerCloud / 4):
                #bad point, get a new one
                if k == 0:
                    print("\n File %s" % (cloud_file))
                outputText = "Voxel " + str(
                    voxel.shape[0]) + " " + str(k) + "/" + str(
                        sorted_subset.shape[0])
                print(outputText, end='\r')
                #print('\nFile: %s bad point %d/%d\r'%(someFile[0],k,sorted_subset.shape[0]))
                #print('bad point %d of %d Voxel: %d'%(k,sorted_subset.shape[0],voxel.shape[0]))
                k += 1
                if k >= sorted_subset.shape[0]:
                    outOfPoints = True
                    print('Exhausted File')
                    break
            else:
                if actual_voxel_size >= pointsPerCloud:
                    sample = sample_cloud(voxel, pointsPerCloud)
                else:
                    print('padding')
                    padding = point + np.zeros(
                        (pointsPerCloud - actual_voxel_size, 3),
                        dtype=np.float32)
                    sample = np.concatenate((padding, voxel), axis=0)
                #center cloud
                dataClouds[start_id + j, ...] = sample - point
                j += 1
                #print('\tVoxel size (%d,%d) SampleSize(%d,%d) start_id %d +j %d'%(voxel.shape[0],voxel.shape[1],sample.shape[0],sample.shape[1],start_id,j))
            if j == sample_from_file:
                complete_sample = True
        if not outOfPoints:
            start_id += sample_from_file
            actually_sampled += sample_from_file
            bar.next(sample_from_file)
        else:
            break
    bar.finish()
    if outOfPoints or actually_sampled != number_of_samples:
        return np.empty((0, 6)), np.empty((0, 0, 0))
    else:
        return dataPoints, dataClouds
# import matplotlib.pyplot as plt
# import seaborn as sns
from raschietto import Raschietto, Matcher
from progress.bar import Bar

# def loadData():
#     with open('sri/data.pkl', 'rb') as handle:
#         teamData = pickle.load(handle)
#
#     return teamData

store = "Y"

if store == "Y":
    teams = load()
    bar = Bar('Cleaning Data', max=len(teams))
    for team in list(teams):  # teams.keys():
        bar.next()
        try:
            for i in teams[team]:
                if i > 239:
                    del teams[team]
                    continue
        except:
            pass
        try:
            if np.isnan(teams[team][0]):
                del teams[team]
                continue
        except:
            pass
Exemple #7
0
def prefetch_test(opt):
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    Dataset = dataset_factory[opt.test_dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    Logger(opt)

    split = 'val' if not opt.trainval else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.load_results != '':
        load_results = json.load(open(opt.load_results, 'r'))
        for img_id in load_results:
            for k in range(len(load_results[img_id])):
                if load_results[img_id][k][
                        'class'] - 1 in opt.ignore_loaded_cats:
                    load_results[img_id][k]['score'] = -1
    else:
        load_results = {}

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(
        opt, dataset, detector.pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'track']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    if opt.use_loaded_results:
        for img_id in data_loader.dataset.images:
            results[img_id] = load_results['{}'.format(img_id)]
        num_iters = 0
    for ind, (img_id, pre_processed_images) in enumerate(data_loader):
        if ind >= num_iters:
            break
        if opt.tracking and ('is_first_frame' in pre_processed_images):
            if '{}'.format(int(img_id.numpy().astype(
                    np.int32)[0])) in load_results:
                pre_processed_images['meta']['pre_dets'] = \
                    load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
            else:
                print()
                print('No pre_dets for',
                      int(img_id.numpy().astype(np.int32)[0]),
                      '. Use empty initialization.')
                pre_processed_images['meta']['pre_dets'] = []
            detector.reset_tracking()
            print('Start tracking video',
                  int(pre_processed_images['video_id']))
        if opt.public_det:
            if '{}'.format(int(img_id.numpy().astype(
                    np.int32)[0])) in load_results:
                pre_processed_images['meta']['cur_dets'] = \
                    load_results['{}'.format(int(img_id.numpy().astype(np.int32)[0]))]
            else:
                print('No cur_dets for',
                      int(img_id.numpy().astype(np.int32)[0]))
                pre_processed_images['meta']['cur_dets'] = []

        ret = detector.run(pre_processed_images)
        results[int(img_id.numpy().astype(np.int32)[0])] = ret['results']

        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        # for t in avg_time_stats:
        #     avg_time_stats[t].update(ret[t])
        #     Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
        #         t, tm = avg_time_stats[t])
        if opt.print_iter > 0:
            if ind % opt.print_iter == 0:
                print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
        else:
            bar.next()
    bar.finish()
    if opt.save_results:
        print(
            'saving results to',
            opt.save_dir + '/save_results_{}{}.json'.format(
                opt.test_dataset, opt.dataset_version))
        json.dump(
            _to_list(copy.deepcopy(results)),
            open(
                opt.save_dir + '/save_results_{}{}.json'.format(
                    opt.test_dataset, opt.dataset_version), 'w'))
    dataset.run_eval(results, opt.data_dir, opt.save_dir)
Exemple #8
0
def run(filename, split, level, dred, dbrown, debug):

    # Create OpenSlide object
    ndpi = OpenSlide(filename)
    ndpi_width = ndpi.dimensions[0]
    ndpi_height = ndpi.dimensions[1]
    total_width = ndpi.level_dimensions[level][0]
    total_height = ndpi.level_dimensions[level][1]

    red_sum = 0.0
    brown_sum = 0.0
    surface_sum = 0.0

    startTime = time.time()

    if debug:
        print "filename: {}".format(filename)
        print "split: {}".format(split)
        print "level: {}".format(level)
        print "dred: {}".format(dred)
        print "dbrown: {}".format(dbrown)
        print "debug: {}".format(debug)

    if debug:
        print "================ START ================="
        print "LOAD {}".format(filename)
        print "width:".ljust(20) + str(ndpi_width)
        print "height:".ljust(20) + str(ndpi_height)
        print "level count:".ljust(20) + str(ndpi.level_count)
        print "split image {}x{} , level:{} , factor:{}".format(
            total_width, total_height, level, split)

    bar = Bar('Processing', max=split**2)
    for i in range(split):
        for j in range(split):
            x = i * ndpi_width / split
            y = j * ndpi_height / split
            w = total_width / split
            h = total_height / split

            if debug:
                print "\n>SLICE [{}][{}]".format(i, j)
                print "x:{:3} y:{:3} w:{:3}px h:{:3}px:".format(x, y, w, h)

            region = ndpi.read_region((x, y), level, (w, h))
            red = bgsa.get_red(region, brightness=-dred)
            brown = bgsa.get_brown(region, brightness=-dbrown)
            # surface = bgsa.get_surface(region)

            region.save("output/normal_slice{}{}.png".format(i, j))
            red.save("output/red_slice_{}{}.png".format(i, j))
            brown.save("output/brown_slice_{}{}.png".format(i, j))
            # surface.save("output/surface_slice_{}{}.png".format(i,j))

            red_sum += bgsa.get_white_pixels(red)
            brown_sum += bgsa.get_white_pixels(brown)
            # surface_sum+= bgsa.get_white_pixels(surface)

            if debug:
                bar.next()

            # print "white:{}% black{}%".format(results["white"], results["black"])

    if debug:
        bar.finish()
        print "Finished....in {:.2f} sec".format(time.time() - startTime)
        print "total red   :".ljust(20) + str(red_sum)
        print "total brown :".ljust(20) + str(brown_sum)

    return {"red": red_sum, "brown": brown_sum}
Exemple #9
0
    arg.add_argument("-d",
                     "--destination",
                     help="Specify the directory to output final image to.",
                     required=True)
    arg.add_argument("-n",
                     "--num_tiles",
                     help="Max number of tiles to solve.",
                     required=False)
    args = arg.parse_args()
    cm = cachemap(source=args.source, dest=args.destination)

    if os.path.isdir(args.source):  # if source is valid, proceed
        image_list = []
        max = len(os.listdir(
            args.source))  # number of images in dir (starting at 1)
        bar = Bar("[+] Reading in images from %s" % (args.source), max=max)
        for image in os.listdir(args.source):
            if image.endswith(".bmp"):
                # add image to array
                image_list.append(Image.open(os.path.join(args.source, image)))
                bar.next()
    else:
        sys.stderr.write(
            "Invalid -s/--source path %s. Use -h/--help for help" %
            (os.linesep))
        exit(-1)

#     for img in images:
#         if cm.read_bmp(img):
#             cm.genetic_algo()
#             cm.export_sol()
Exemple #10
0
def verify_batch_consumer_performance():
    """ Verify batch Consumer performance """

    conf = {'bootstrap.servers': bootstrap_servers,
            'group.id': uuid.uuid1(),
            'session.timeout.ms': 6000,
            'error_cb': error_cb,
            'default.topic.config': {
                'auto.offset.reset': 'earliest'
            }}

    c = confluent_kafka.Consumer(**conf)

    def my_on_assign(consumer, partitions):
        print('on_assign:', len(partitions), 'partitions:')
        for p in partitions:
            print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))
        consumer.assign(partitions)

    def my_on_revoke(consumer, partitions):
        print('on_revoke:', len(partitions), 'partitions:')
        for p in partitions:
            print(' %s [%d] @ %d' % (p.topic, p.partition, p.offset))
        consumer.unassign()

    c.subscribe([topic], on_assign=my_on_assign, on_revoke=my_on_revoke)

    max_msgcnt = 1000000
    bytecnt = 0
    msgcnt = 0
    batch_size = 1000

    print('Will now consume %d messages' % max_msgcnt)

    if with_progress:
        bar = Bar('Consuming', max=max_msgcnt,
                  suffix='%(index)d/%(max)d [%(eta_td)s]')
    else:
        bar = None

    while msgcnt < max_msgcnt:
        # Consume until we hit max_msgcnt

        msglist = c.consume(num_messages=batch_size, timeout=20.0)

        for msg in msglist:
            if msg.error():
                if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:
                    # Reached EOF for a partition, ignore.
                    continue
                else:
                    raise confluent_kafka.KafkaException(msg.error())

            bytecnt += len(msg)
            msgcnt += 1

            if bar is not None and (msgcnt % 10000) == 0:
                bar.next(n=10000)

            if msgcnt == 1:
                t_first_msg = time.time()

    if bar is not None:
        bar.finish()

    if msgcnt > 0:
        t_spent = time.time() - t_first_msg
        print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %
              (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,
               (bytecnt / t_spent) / (1024*1024)))

    print('closing consumer')
    c.close()
Exemple #11
0
def verify_stats_cb():
    """ Verify stats_cb """

    def stats_cb(stats_json_str):
        global good_stats_cb_result
        stats_json = json.loads(stats_json_str)
        if topic in stats_json['topics']:
            app_offset = stats_json['topics'][topic]['partitions']['0']['app_offset']
            if app_offset > 0:
                print("# app_offset stats for topic %s partition 0: %d" %
                      (topic, app_offset))
                good_stats_cb_result = True

    conf = {'bootstrap.servers': bootstrap_servers,
            'group.id': uuid.uuid1(),
            'session.timeout.ms': 6000,
            'error_cb': error_cb,
            'stats_cb': stats_cb,
            'statistics.interval.ms': 200,
            'default.topic.config': {
                'auto.offset.reset': 'earliest'
            }}

    c = confluent_kafka.Consumer(**conf)
    c.subscribe([topic])

    max_msgcnt = 1000000
    bytecnt = 0
    msgcnt = 0

    print('Will now consume %d messages' % max_msgcnt)

    if with_progress:
        bar = Bar('Consuming', max=max_msgcnt,
                  suffix='%(index)d/%(max)d [%(eta_td)s]')
    else:
        bar = None

    while not good_stats_cb_result:
        # Consume until EOF or error

        msg = c.poll(timeout=20.0)
        if msg is None:
            raise Exception('Stalled at %d/%d message, no new messages for 20s' %
                            (msgcnt, max_msgcnt))

        if msg.error():
            if msg.error().code() == confluent_kafka.KafkaError._PARTITION_EOF:
                # Reached EOF for a partition, ignore.
                continue
            else:
                raise confluent_kafka.KafkaException(msg.error())

        bytecnt += len(msg)
        msgcnt += 1

        if bar is not None and (msgcnt % 10000) == 0:
            bar.next(n=10000)

        if msgcnt == 1:
            t_first_msg = time.time()
        if msgcnt >= max_msgcnt:
            break

    if bar is not None:
        bar.finish()

    if msgcnt > 0:
        t_spent = time.time() - t_first_msg
        print('%d messages (%.2fMb) consumed in %.3fs: %d msgs/s, %.2f Mb/s' %
              (msgcnt, bytecnt / (1024*1024), t_spent, msgcnt / t_spent,
               (bytecnt / t_spent) / (1024*1024)))

    print('closing consumer')
    c.close()
Exemple #12
0
def verify_producer_performance(with_dr_cb=True):
    """ Time how long it takes to produce and delivery X messages """
    conf = {'bootstrap.servers': bootstrap_servers,
            'api.version.request': api_version_request,
            'error_cb': error_cb}

    p = confluent_kafka.Producer(**conf)

    msgcnt = 1000000
    msgsize = 100
    msg_pattern = 'test.py performance'
    msg_payload = (msg_pattern * int(msgsize / len(msg_pattern)))[0:msgsize]

    dr = MyTestDr(silent=True)

    t_produce_start = time.time()
    msgs_produced = 0
    msgs_backpressure = 0
    print('# producing %d messages to topic %s' % (msgcnt, topic))

    if with_progress:
        bar = Bar('Producing', max=msgcnt)
    else:
        bar = None

    for i in range(0, msgcnt):
        while True:
            try:
                if with_dr_cb:
                    p.produce(topic, value=msg_payload, callback=dr.delivery)
                else:
                    p.produce(topic, value=msg_payload)
                break
            except BufferError:
                # Local queue is full (slow broker connection?)
                msgs_backpressure += 1
                if bar is not None and (msgs_backpressure % 1000) == 0:
                    bar.next(n=0)
                p.poll(100)
            continue

        if bar is not None and (msgs_produced % 5000) == 0:
            bar.next(n=5000)
        msgs_produced += 1
        p.poll(0)

    t_produce_spent = time.time() - t_produce_start

    bytecnt = msgs_produced * msgsize

    if bar is not None:
        bar.finish()

    print('# producing %d messages (%.2fMb) took %.3fs: %d msgs/s, %.2f Mb/s' %
          (msgs_produced, bytecnt / (1024*1024), t_produce_spent,
           msgs_produced / t_produce_spent,
           (bytecnt/t_produce_spent) / (1024*1024)))
    print('# %d temporary produce() failures due to backpressure (local queue full)' % msgs_backpressure)

    print('waiting for %d/%d deliveries' % (len(p), msgs_produced))
    # Wait for deliveries
    p.flush()
    t_delivery_spent = time.time() - t_produce_start

    print('# producing %d messages (%.2fMb) took %.3fs: %d msgs/s, %.2f Mb/s' %
          (msgs_produced, bytecnt / (1024*1024), t_produce_spent,
           msgs_produced / t_produce_spent,
           (bytecnt/t_produce_spent) / (1024*1024)))

    # Fake numbers if not using a dr_cb
    if not with_dr_cb:
        print('# not using dr_cb')
        dr.msgs_delivered = msgs_produced
        dr.bytes_delivered = bytecnt

    print('# delivering %d messages (%.2fMb) took %.3fs: %d msgs/s, %.2f Mb/s' %
          (dr.msgs_delivered, dr.bytes_delivered / (1024*1024), t_delivery_spent,
           dr.msgs_delivered / t_delivery_spent,
           (dr.bytes_delivered/t_delivery_spent) / (1024*1024)))
    print('# post-produce delivery wait took %.3fs' %
          (t_delivery_spent - t_produce_spent))
Exemple #13
0
    def Commentaries(self, codeArg, outputArg):
        countLineOutput = 0
        countLineInput = 0
        noCommentary = 0
        isCommentary = 0
        countRecursFiles = 0

        if codeArg == "python":
            detectFiles = "py"
            blockDir = "__pycache__"

            commentariesBeginLine = r"^\#.*"  # Begin '#'
            quoteOfCommentariesMultipleLines = r"^\s*[\"|\']{3}$"  # """ and ''' without before variables and if commentaries is over multiple lines
            quoteInRegex = r"\={1}\s*r[\"|\']{1}"  # If quote in regex
            quoteOfEndCommentariesMultipleLines = r"^\s*[\"|\']{3}\)?\.?"  # """ and ''' without before variables, if commentaries is over multiple lines and he finish by .format() funtion
            quoteOfCommentariesOneLine = r"[\"|\']{3}.*[\"|\']{3}$"  # """ and ''' without before variables and if commentary is over one line, (""" commentaries """)
            quoteIntoVariable = r".*\={1}\s*\w*\.?\w*[\(|\.]{1}[\"|\']{3}|.*\={1}\s*[\"|\']{3}"  # """ and ''' with before variables
            commentariesAfterLine = r"\s*\#[^\"|^\'|^\.|^\?|^\*|^\!|^\]|^\[|^\\|^\)|^\(|^\{|^\}].*"  # '#' after line of code

        recursFiles = [
            f for f in glob.glob("{0}{1}**{1}*.{2}".format(
                outputArg, self.utils.Platform(), detectFiles),
                                 recursive=True)
        ]

        # -- Remove commentaries and Count commentaries will be removed -- #
        for number in recursFiles:
            countRecursFiles += 1

        print("\n[+] Running remove commentaries in {0} file(s)...\n".format(
            countRecursFiles))

        with Bar(PROGRESS_COLOUR + 'Processing', max=countRecursFiles) as bar:
            for file in recursFiles:
                if blockDir in file:
                    continue
                else:
                    # -- Remove commentaries -- #
                    with fileinput.input(file, inplace=True) as inputFile:
                        for eachLine in inputFile:
                            searchCommentariesAfterLine = re.search(
                                commentariesAfterLine, eachLine)
                            searchCommentariesBeginLine = re.search(
                                commentariesBeginLine, eachLine)
                            if codeArg == "python":
                                if "coding" in eachLine or "#!" in eachLine:
                                    print(eachLine)
                                    continue

                                if re.match(quoteInRegex, eachLine):
                                    continue
                                elif re.match(quoteIntoVariable, eachLine):
                                    noCommentary += 1
                                elif re.match(
                                        quoteOfCommentariesMultipleLines,
                                        eachLine
                                ) or re.match(
                                        quoteOfEndCommentariesMultipleLines,
                                        eachLine):
                                    isCommentary += 1
                                else:
                                    pass

                                if re.match(quoteOfCommentariesOneLine,
                                            eachLine):
                                    countLineInput += 1
                                    isCommentary = 0
                                    continue
                                elif isCommentary == 1 and noCommentary == 0:
                                    countLineInput += 1
                                    continue
                                elif isCommentary == 0 and noCommentary == 1:
                                    print(eachLine)
                                    continue
                                elif isCommentary == 2:
                                    countLineInput += 1
                                    isCommentary = 0
                                    continue
                                elif isCommentary == 1 and noCommentary == 1:
                                    isCommentary = 0
                                    noCommentary = 0
                                    print(eachLine)
                                    continue
                                else:
                                    pass

                            if searchCommentariesBeginLine is not None:
                                countLineInput += 1
                                eachLine = eachLine.replace(
                                    searchCommentariesBeginLine.group(0), "")
                                print(eachLine)
                            elif searchCommentariesAfterLine is not None:
                                eachLine = eachLine.replace(
                                    searchCommentariesAfterLine.group(0), "")
                                countLineInput += 1
                                print(eachLine)
                            else:
                                print(eachLine)
                bar.next(1)
            bar.finish()

        # -- Initialize vars -- #
        isCommentary = 0
        noCommentary = 0

        # -- Check if all commentaries are removed -- #
        for file in recursFiles:
            countLineOutput = 0
            if blockDir in file:
                continue
            else:
                with open(file, "r") as readFile:
                    countLineOutput = 0
                    readF = readFile.readlines()
                    for eachLine in readF:
                        searchCommentariesAfterLine = re.search(
                            commentariesAfterLine, eachLine)
                        searchCommentariesBeginLine = re.search(
                            commentariesBeginLine, eachLine)
                        if codeArg == "python":
                            if "coding" in eachLine or "#!" in eachLine:
                                continue

                            if re.match(quoteInRegex, eachLine):
                                continue
                            elif re.match(quoteIntoVariable, eachLine):
                                noCommentary += 1
                            elif re.match(
                                    quoteOfCommentariesMultipleLines,
                                    eachLine) or re.match(
                                        quoteOfEndCommentariesMultipleLines,
                                        eachLine):
                                isCommentary += 1
                            else:
                                pass

                            if re.match(quoteOfCommentariesOneLine, eachLine):
                                isCommentary = 0
                                countLineOutput += 1
                                continue
                            elif isCommentary == 1 and noCommentary == 0:
                                countLineOutput += 1
                                continue
                            elif isCommentary == 0 and noCommentary == 1:
                                continue
                            elif isCommentary == 2:
                                isCommentary = 0
                                countLineOutput += 1
                                continue
                            elif isCommentary == 1 and noCommentary == 1:
                                isCommentary = 0
                                noCommentary = 0
                                continue
                            else:
                                pass

                        if searchCommentariesBeginLine is not None:
                            countLineOutput += 1
                        elif searchCommentariesAfterLine is not None:
                            countLineOutput += 1
                        else:
                            pass

        if (Remove.Backslashes(self, codeArg, outputArg) == 0):
            if countLineOutput == 0:
                print("\n-> {0} lines of commentaries removed\n".format(
                    countLineInput))
                return EXIT_SUCCESS
            else:
                return EXIT_FAILURE
        else:
            return EXIT_FAILURE
Exemple #14
0
    def PrintFunctions(self, codeArg, outputArg):
        countPrintLine = 0
        countCheckPrintLine = 0
        countRecursFiles = 0
        checkPrintPy3MultipleLines = 0
        checkPrintPy2MultipleLines = 0

        if codeArg == "python":
            detectFiles = "py"
            blockDir = "__pycache__"

            detectPrint = r"\s*print"
            detectPythonPrint2 = r"\s*print\s*[\"|\']{1}"
            detectPythonPrint3 = r"\s*print\s*\({1}"
            detectPythonPrintMultipleLines = r"^\s+[\"\']{1}\s*\w+|^[\"\']{1}\s*\w+"

        recursFiles = [
            f for f in glob.glob("{0}{1}**{1}*.{2}".format(
                outputArg, self.utils.Platform(), detectFiles),
                                 recursive=True)
        ]

        for number in recursFiles:
            countRecursFiles += 1

        print("\n[+] Running remove print function in {0} file(s)...\n".format(
            countRecursFiles))

        with Bar(PROGRESS_COLOUR + 'Processing', max=countRecursFiles) as bar:
            for file in recursFiles:
                if blockDir in file:
                    continue
                else:
                    # -- Remove all print functions -- #
                    with fileinput.input(file, inplace=True) as inputFile:
                        for eachLine in inputFile:
                            if re.match(detectPrint, eachLine):
                                countPrintLine += 1
                                # -- If print() python 3 is multiple lines -- #
                                if re.match(detectPythonPrint3, eachLine):
                                    if "(" in eachLine and not ")" in eachLine:
                                        checkPrintPy3MultipleLines += 1
                                        continue
                                    else:
                                        continue
                                # -- If print python 2 is multiple lines -- #
                                elif re.match(detectPythonPrint2, eachLine):
                                    checkPrintPy2MultipleLines += 1
                                    continue
                            else:
                                if checkPrintPy3MultipleLines == 1:
                                    if ")" in eachLine and not "(" in eachLine:
                                        checkPrintPy3MultipleLines = 0
                                        continue
                                    else:
                                        continue
                                elif checkPrintPy2MultipleLines > 0:
                                    if re.match(detectPythonPrintMultipleLines,
                                                eachLine):
                                        checkPrintPy2MultipleLines += 1
                                        continue
                                    else:
                                        checkPrintPy2MultipleLines = 0
                                        print(eachLine)
                                        continue
                                else:
                                    print(eachLine)
                bar.next(1)
            bar.finish()

        # -- Check if all print functions are removed -- #
        for file in recursFiles:
            if blockDir in file:
                continue
            else:
                with open(file, "r") as readFile:
                    readF = readFile.readlines()
                    for eachLine in readF:
                        if re.match(detectPrint, eachLine):
                            countCheckPrintLine += 1

        if (Remove.Backslashes(self, codeArg, outputArg) == 0):
            if countCheckPrintLine == 0:
                print("\n-> {0} print functions removed\n".format(
                    countPrintLine))
                return EXIT_SUCCESS
            else:
                return EXIT_FAILURE
        else:
            return EXIT_FAILURE
Exemple #15
0
    def run_epoch(self, phase, epoch, data_loader):
        model_with_loss = self.model_with_loss
        if phase == 'train':
            model_with_loss.train()
        else:
            if len(self.opt.gpus) > 1:
                model_with_loss = self.model_with_loss.module
            model_with_loss.eval()
            torch.cuda.empty_cache()

        opt = self.opt
        results = {}
        data_time, batch_time = AverageMeter(), AverageMeter()
        avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
        num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
        bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
        end = time.time()
        for iter_id, batch in enumerate(data_loader):
            if iter_id >= num_iters:
                break
            data_time.update(time.time() - end)

            for k in batch:
                if k != 'meta':
                    batch[k] = batch[k].to(device=opt.device,
                                           non_blocking=True)
            output, loss, loss_stats = model_with_loss(batch)
            loss = loss.mean()
            if phase == 'train':
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            batch_time.update(time.time() - end)
            end = time.time()

            Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
                epoch,
                iter_id,
                num_iters,
                phase=phase,
                total=bar.elapsed_td,
                eta=bar.eta_td)
            for l in avg_loss_stats:
                avg_loss_stats[l].update(loss_stats[l].mean().item(),
                                         batch['input'].size(0))
                Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(
                    l, avg_loss_stats[l].avg)
            if not opt.hide_data_time:
                Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
                  '|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
            if opt.print_iter > 0:
                if iter_id % opt.print_iter == 0:
                    print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
            else:
                bar.next()

            if opt.test:
                self.save_result(output, batch, results)
            del output, loss, loss_stats

        bar.finish()
        ret = {k: v.avg for k, v in avg_loss_stats.items()}
        ret['time'] = bar.elapsed_td.total_seconds() / 60.
        return ret, results
Exemple #16
0
def main():
    capture = cv2.VideoCapture('input.mp4')
    background_subtractor = cv2.createBackgroundSubtractorMOG2()
    length = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))

    bar = Bar('Processing Frame', max=length)

    first_iteration_indicator = 1
    for i in range(0, length):
        ret, frame = capture.read()

        #If first frame
        if first_iteration_indicator == 1:

            first_frame = copy.deepcopy(frame)
            height, width = frame.shape[:2]
            accum_image = np.zeros((height, width), np.uint8)
            first_iteration_indicator = 0

        else:
            filter = background_subtractor.apply(frame)
            cv2.imwrite('./frame.jpg', frame)
            cv2.imwrite('./diff-bkgnd-frame.jpg', filter)

            threshold = 2
            maxValue = 2
            ret, th1 = cv2.threshold(filter, threshold, maxValue,
                                     cv2.THRESH_BINARY)

            #add to the accumulated image
            accum_image = cv2.add(accum_image, th1)
            cv2.imwrite('./mask.jpg', accum_image)

            color_image_video = cv2.applyColorMap(accum_image,
                                                  cv2.COLORMAP_SUMMER)

            video_frame = cv2.addWeighted(frame, 0.7, color_image_video, 0.7,
                                          0)

            name = "./frames/frame%d.jpg" % i
            print(name)
            cv2.imwrite(name, video_frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        bar.next()

    bar.finish()

    make_video('./frames/', './output.avi')

    color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)
    result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)

    #save the final heatmap
    cv2.imwrite('diff-overlay.jpg', result_overlay)

    #cleanup
    capture.release()
    cv2.destroyAllWindows()
def extractAliases(repo: git.Repo, aliasPath: str, repoShortname: str,
                   token: str, maxDistance: float):
    commits = list(repo.iter_commits())

    # get all distinct author emails
    emails = set(commit.author.email.lower()
                 for commit in Bar('Processing').iter(commits))

    # get a commit per email
    shasByEmail = {}
    for email in Bar('Processing').iter(emails):
        commit = next(commit for commit in commits
                      if commit.author.email.lower() == email)

        shasByEmail[email] = commit.hexsha

    # query github for author logins by their commits
    loginsByEmail = dict()
    emailsWithoutLogins = []

    for email in Bar('Processing').iter(shasByEmail):
        sha = shasByEmail[email]
        url = 'https://api.github.com/repos/{}/commits/{}'.format(
            repoShortname, sha)
        request = requests.get(url,
                               headers={'Authorization': 'token ' + token})
        commit = request.json()

        if not 'author' in commit.keys():
            continue

        author = commit['author']

        if not author is None:
            loginsByEmail[email] = author['login']
        else:
            emailsWithoutLogins.append(email)

    # build initial alias collection from logins
    aliases = {}
    usedAsValues = {}

    for email in loginsByEmail:
        login = loginsByEmail[email]
        aliasEmails = aliases.setdefault(login, [])
        aliasEmails.append(email)
        usedAsValues[email] = login

    for authorA in Bar('Processing').iter(emailsWithoutLogins):
        quickMatched = False

        # go through used values
        for key in usedAsValues:
            if authorA == key:
                quickMatched = True
                continue

            if (areSimilar(authorA, key, maxDistance)):
                alias = usedAsValues[key]
                aliases[alias].append(authorA)
                usedAsValues[authorA] = alias
                quickMatched = True
                break

        if quickMatched:
            continue

        # go through already extracted keys
        for key in aliases:
            if authorA == key:
                quickMatched = True
                continue

            if (areSimilar(authorA, key, maxDistance)):
                aliases[key].append(authorA)
                usedAsValues[authorA] = key
                quickMatched = True
                break

        if quickMatched:
            continue

        # go through all authors
        for authorB in emailsWithoutLogins:
            if authorA == authorB:
                continue

            if (areSimilar(authorA, authorB, maxDistance)):
                aliasedAuthor = aliases.setdefault(authorA, [])
                aliasedAuthor.append(authorB)
                usedAsValues[authorB] = authorA
                break

    # output to yaml
    with open(aliasPath, 'a', newline='') as f:
        yaml.dump(aliases, f)
Exemple #18
0
import time

from progress.bar import Bar  #pip install progress

bar = Bar('Processing',
          max=20,
          suffix='%(index)d/%(max)d - %(percent).1f%% - %(eta)ds')
for i in range(20):
    time.sleep(.05)
    bar.next()
bar.finish()
Exemple #19
0
def magic_eight_ball():
    responses = [
        "There is never enough time in the morning. Try to combine brushing your teeth with your breakfast.",
        "A sticking plaster can heal any wound. You just have to believe.",
        "Floss. It's more important than you would think.",
        "You should probaby drink more water.",
        "You should consider buying a plunger before you need a plunger",
        "You know what you should probably earn more than you show, speak less than you know.",
        "Hahahahaha",
        "Once a week, take a bath in Epsom Salts, and if you can, add half cup of baking soda and some essential oil such as lavender.",
        "When exercising, count backwards. For example, if you are carrying out 20 sit ups, don’t count from 1 to 20, start at 20 and count backwards as you do them.",
        "Start listening to your gut instinct. It’s always right",
        "Never give anyone more than 2 chances.",
        "Wear sunscreen, even if you think you don't need it",
        "If you can do something in less than 5 minutes. Do it now.",
        "Always strive to stand and sit with good posture.", "Just have fun",
        "To be Idle is to be foolish",
        "You might want to run, but you should stay and fight.",
        "Face the truth with dignity", "Travel is in your future",
        "Don't wait for success to come - go find it!"
    ]
    question = (input(
        "Hi,Enter your question\n or..\n Enter F to crack your fortune cookie \nEnter Q to quit game "
    )).upper()
    if question == "Q":
        return "Come back again soon"
    elif question == "F":
        bar = Bar('Processing', max=20, suffix='%(percent)d%%')
        for i in range(20):
            time.sleep(.15)
            bar.next()
        bar.finish()
        print(random.choice(responses))
        Continue = (input(
            "Play again?\n Enter 'Yes' to continue  or...\n 'No' to exit game "
        )).upper()
        if Continue == "YES":
            magic_eight_ball()
        else:
            return "come back again"
    elif len(question) < 10:
        bar = Bar('Processing', max=20, suffix='%(percent)d%%')
        for i in range(20):
            time.sleep(.15)
            bar.next()
        bar.finish()
        print("Invalid input")
        Continue = (input(
            "Play again?\n Enter 'Yes' to continue  or...\n 'No' to exit game "
        )).upper()
        if Continue == "YES":
            magic_eight_ball()
        else:
            return "come back again"
    else:
        bar = Bar('Processing', max=20, suffix='%(percent)d%%')
        for i in range(20):
            time.sleep(.15)
            bar.next()
        bar.finish()
        print(random.choice(responses))
        Continue = (input(
            "Play again?\n Enter 'Yes' to continue  or...\n 'No' to exit game "
        )).upper()
        if Continue == "YES":
            magic_eight_ball()
        else:
            return "come back again"
def trunco_check(path_tranco_list, path_names):
    """
    Args:
        path_tranco_list: top1m from tranco
        path_names = list of dom names
    """
    #path tranco list
    path_tranco = path_tranco_list
    #path lista nomi dom campus
    path_lnd_campus = path_names

    #extract domain names from tranco list
    tranco_dom = []
    with open(path_tranco) as tranco_csv:
        lines = tranco_csv.readlines()
        for line in lines:
            tranco_dom.append(line)

    campus_dom = []
    #extract domain from campus list, removing 'www.', 'www8.','www2.' at beginning of domain names
    with open(path_lnd_campus) as campus_txt:
        lines = campus_txt.readlines()
        for line in lines:
            if '\n' in line:
                #list_domain_name.append(line.strip('\n'))
                line = line.strip('\n')
                if 'www8.' in line:
                    line = line.replace('www8.', '')
                    campus_dom.append(line)
                elif 'www2.' in line:
                    line = line.replace('www2.', '')
                    campus_dom.append(line)
                    #aggiungere gestione che toglie il 'www.', 'www2.', 'www8.'.. anche sotto
                elif 'www.' in line:
                    line = line.replace('www.', '')
                    campus_dom.append(line)
                else:
                    print('[debug]probably no new case')
                    campus_dom.append(line)
            else:
                if 'www8.' in line:
                    line = line.replace('www8.', '')
                    campus_dom.append(line)
                elif 'www2.' in line:
                    line = line.replace('www2.', '')
                    campus_dom.append(line)
                    #aggiungere gestione che toglie il 'www.', 'www2.', 'www8.'.. anche sotto
                elif 'www.' in line:
                    line = line.replace('www.', '')
                    campus_dom.append(line)
                else:
                    print('[debug]2 probably no new case ')
                    campus_dom.append(line)

    if os.path.exists('tranco_inter_campus.csv'):
        print('Removingn old csv')
        os.remove('tranco_inter_campus.csv')
    else:
        print('No old csv exists')

    header = ['domain', 'tranco']
    print('CSV Creation')

    bar_csv = Bar('Csv creation', max=len(campus_dom), fill='~')
    with open('tranco_inter_campus.csv', mode='a') as csv_out:
        writer = csv.writer(csv_out)
        writer.writerow(header)
        for x in campus_dom:
            data = []
            if x in tranco_dom:
                data.append(str(x))
                data.append('1')
                writer.writerow(data)
            else:
                data.append(str(x))
                data.append('0')
                writer.writerow(data)
            bar_csv.next()
    bar_csv.finish()
    def run_epoch(self, phase, epoch, data_loader, rank):
        model_with_loss = self.model_with_loss
        if phase == 'train':
            model_with_loss.train()
        else:
            model_with_loss.eval()
            torch.cuda.empty_cache()

        results = {}
        data_time, batch_time = AverageMeter(), AverageMeter()
        avg_loss_stats = {
            l: AverageMeter()
            for l in self.loss_stats if l in ('tot', 'hm', 'wh', 'tracking')
        }
        num_iters = len(
            data_loader
        ) if self.args.num_iters[phase] < 0 else self.args.num_iters[phase]
        bar = Bar('{}'.format("tracking"), max=num_iters)
        end = time.time()
        for iter_id, batch in enumerate(data_loader):
            if iter_id >= num_iters:
                break
            data_time.update(time.time() - end)

            for k in batch:
                if k in ('fpath', 'prev_fpath'):
                    continue
                if type(batch[k]) != list:
                    batch[k] = batch[k].to(self.args.device, non_blocking=True)
                else:
                    for i in range(len(batch[k])):
                        batch[k][i] = batch[k][i].to(self.args.device,
                                                     non_blocking=True)

            output, loss, loss_stats = model_with_loss(batch)
            loss = loss.mean()
            if phase == 'train':
                self.optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(model_with_loss.parameters(),
                                               self.args.clip_value)
                self.optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            Bar.suffix = '{phase}: [{0}][{1}/{2}]| '.format(epoch,
                                                            iter_id,
                                                            num_iters,
                                                            phase=phase)
            for l in avg_loss_stats:
                avg_loss_stats[l].update(loss_stats[l].mean().item(),
                                         batch['image'].size(0))
                Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(
                    l, avg_loss_stats[l].avg)
            Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
                                      '|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)

            if rank == 0 and phase == 'val' and self.args.write_mota_metrics and epoch in self.args.save_point:
                curr_name = None
                tracker = None
                for i in range(self.args.batch_size):
                    try:
                        fpath = batch['fpath'][i]
                    except IndexError:
                        break
                    fpath = fpath.split('.')[0].split('/')[-1]

                    name, num = fpath.split("_frame_")
                    num = int(num)
                    if num % self.args.val_select_frame != 0:
                        continue

                    if name != curr_name:
                        curr_name = name
                        tracker = Tracker(self.args)

                    out = [x[i][None] for x in output]
                    res = out
                    dets = generic_decode(
                        {k: res[t]
                         for (t, k) in enumerate(self.args.heads)},
                        self.args.max_objs, self.args)
                    for k in dets:
                        dets[k] = dets[k].detach().cpu().numpy()

                    if not tracker.init and len(dets) > 0:
                        tracker.init_track(dets)
                    elif len(dets) > 0:
                        tracker.step(dets)

                    with open(os.path.join(self.args.res_dir, fpath + '.txt'),
                              "w") as f:
                        for track in tracker.tracks:
                            x1, y1, x2, y2 = track['bbox']
                            f.write("{} {} {} {} {} {}\n".format(
                                track['score'], track['tracking_id'], x1, y1,
                                x2, y2))
            if rank == 0 and self.args.print_iter > 0:  # If not using progress bar
                if iter_id % self.args.print_iter == 0:
                    print('{}| {}'.format("tracking", Bar.suffix))
            else:
                bar.next()
            del output, loss, loss_stats

        if rank == 0 and phase == 'val' and self.args.write_mota_metrics and epoch in self.args.save_point:
            self.compute_map(epoch)

        bar.finish()
        ret = {k: v.avg for k, v in avg_loss_stats.items()}
        ret['time'] = bar.elapsed_td.total_seconds() / 60.
        return ret, results
Exemple #22
0
def dataGeneration(data_path):

    patient_folders = [
        i for i in os.listdir(data_path)
        if (not i.startswith('.') and i.startswith('patient'))
    ]

    # initialize dataset
    dataset = pd.DataFrame(columns=['label', 'record'])

    Bar.check_tty = False
    bar = Bar('Processing',
              max=len(patient_folders),
              fill='#',
              suffix='%(percent)d%%')

    # a loop for each patient
    for patient_name in patient_folders:
        detail_path = data_path + patient_name + '/'
        record_files = [
            i.split('.')[0] for i in os.listdir(detail_path)
            if i.endswith('.hea')
        ]

        # a loop for each record
        for record_name in record_files:

            # load record
            signal, info = wfdb.rdsamp(detail_path + record_name,
                                       channel_names=['i'])

            fs = 200

            signal = processing.resample_sig(signal[:, 0], info['fs'], fs)[0]

            # set some parameters
            window_size_half = int(fs * 0.125 / 2)
            max_bpm = 230

            # detect QRS peaks
            qrs_inds = processing.gqrs_detect(signal, fs=fs)
            search_radius = int(fs * 60 / max_bpm)
            corrected_qrs_inds = processing.correct_peaks(
                signal,
                peak_inds=qrs_inds,
                search_radius=search_radius,
                smooth_window_size=150)

            average_qrs = 0
            count = 0
            for i in range(1, len(corrected_qrs_inds) - 1):
                start_ind = corrected_qrs_inds[i] - window_size_half
                end_ind = corrected_qrs_inds[i] + window_size_half + 1
                if start_ind < corrected_qrs_inds[
                        i - 1] or end_ind > corrected_qrs_inds[i + 1]:
                    continue
                average_qrs = average_qrs + signal[start_ind:end_ind]
                count = count + 1

            # remove outliers
            if count < 8:
                print('\noutlier detected, discard ' + record_name + ' of ' +
                      patient_name)
                continue

            average_qrs = average_qrs / count

            corrcoefs = []
            for i in range(1, len(corrected_qrs_inds) - 1):
                start_ind = corrected_qrs_inds[i] - window_size_half
                end_ind = corrected_qrs_inds[i] + window_size_half + 1
                if start_ind < corrected_qrs_inds[
                        i - 1] or end_ind > corrected_qrs_inds[i + 1]:
                    corrcoefs.append(-100)
                    continue
                corrcoef = pearsonr(signal[start_ind:end_ind], average_qrs)[0]
                corrcoefs.append(corrcoef)

            max_corr = list(map(corrcoefs.index, heapq.nlargest(8, corrcoefs)))

            index_corr = random.sample(
                list(itertools.permutations(max_corr, 8)), 100)

            for index in index_corr:
                # a temp dataframe to store one record
                record_temp = pd.DataFrame()

                signal_temp = []

                for i in index:
                    start_ind = corrected_qrs_inds[i + 1] - window_size_half
                    end_ind = corrected_qrs_inds[i + 1] + window_size_half + 1
                    sig = processing.normalize_bound(signal[start_ind:end_ind],
                                                     -1, 1)
                    signal_temp = np.concatenate((signal_temp, sig))

                record_temp = record_temp.append(pd.DataFrame(
                    signal_temp.reshape(-1, signal_temp.shape[0])),
                                                 ignore_index=True,
                                                 sort=False)
                record_temp['label'] = patient_name
                record_temp['record'] = record_name

                # add it to final dataset
                dataset = dataset.append(record_temp,
                                         ignore_index=True,
                                         sort=False)

        bar.next()

    bar.finish()

    # save for further use
    dataset.to_csv('PTB_dataset.csv', index=False)

    print('processing completed')
                break
            else:
                print('----')
                time.sleep(timestosleep)        

        i = i + len(m)
        print('{:6d} {:6d} {:7d} {}'.format(i, len(eachunspent), len(unspent), time.time() - qstart))

        time.sleep(3)

    print('get getaddressutxos', len(unspent), time.time() - start)
    print('making txs')

    unspent_sublist =  [unspent[i:i + numberofinputs] for i in range(0, len(unspent), numberofinputs)]

    bar = Bar('Processing', max=len(unspent_sublist))
    for x in unspent_sublist:
        amount_total = 0
        inputs = []
        for y in x:
            amount    = round(Decimal(float(y['satoshis'] / 1e8)), 8)
            txid      = y.get('txid')
            vout      = y.get('outputIndex')

            amount_total = amount_total + amount

            input_ = {
                        "txid": txid,
                        "vout": vout
            }
            # now to add the OTU id to each of the headers in the denoiser mapping file
            for iterate_through_mapped_ids in mapped_IDs:
                denoiser_id_from_mapping_with_uclust_OTU_id[
                    iterate_through_mapped_ids] = value
            # so then want to print each matching value with the OTUid and the header

## now match it to the fasta headers in the denoiser.fasta
from Bio import SeqIO

denoiser_records = SeqIO.parse(open(denoiser_fasta, "rU"), "fasta")

from progress.bar import Bar
# want to see that the script is running.

lib_id_with_denoiser_header_and_OTU_number = {}
bar = Bar('Processing', max=len(mapperID))
i = 0
#pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(denoiser_mapping).start()
for denoiser_record in denoiser_records:
    bar.next()
    #print denoiser_record.description
    descrption_split = denoiser_record.description.split()
    denoiser_record_header_id = descrption_split[1]
    # print denoiser_record_header_id
    lib_id = denoiser_record.id.split("_")[0]
    #print lib_id
    for key, value in denoiser_id_from_mapping_with_uclust_OTU_id.iteritems():
        if denoiser_record_header_id == key:
            #make new dict with record id, lib and OTU number. Will then summarise afterwards
            lib_id_with_denoiser_header_and_OTU_number[
                denoiser_record_header_id] = (lib_id, value)
def getSingleTraining(file):
    path = os.path.abspath(file)
    pos = path.rfind('/')
    tokens = path[pos + 1:].split('_')
    descriptor_id = tokens[6]
    scene_name = tokens[2]
    scene_name = path[:pos] + '/' + scene_name + '_d.pcd'
    file_descriptor = path[:pos] + '/tmp' + descriptor_id + '.csv'
    labels = np.genfromtxt(file_descriptor,
                           dtype='str',
                           skip_header=1,
                           delimiter=',')
    print('Affordances in descriptor %d' % labels.shape[0])
    fileId = tokens[-1]
    tokens = fileId.split('.')
    fileId = tokens[0]
    # print(fileId)
    # # Need only those affordances that have
    # # over 128 good predictions in this result file

    # res_data_file=path[:pos]+'/'+fileId+'_goodPointsX.pcd'
    # res_points_file=path[:pos]+'/'+fileId+'_goodPoints.pcd'

    # data=load_pcd_data(res_data_file,cols=None)
    # #print(data.shape)
    # points,real_c_data=load_pcd_data_binary(res_points_file)
    # #real_c_data=load_pcd_data(res_points_file,cols=(3,),dataType=np.uint32)
    # #real_c_data=np.array(colors[:,-1],dtype=np.int32)
    # red=np.array((real_c_data>>16)& 0x0000ff,dtype=np.uint8).reshape(-1,1)
    # green=np.array((real_c_data>>8)& 0x0000ff,dtype=np.uint8).reshape(-1,1)
    # blue=np.array((real_c_data)& 0x0000ff,dtype=np.uint8).reshape(-1,1)

    # real_c_data=np.concatenate((red,green,blue),axis=1)

    # perPoint=np.sum(real_c_data,axis=1)
    # bounds=np.cumsum(perPoint)
    # #print(bounds)
    # howMany=np.zeros((labels.shape[0],1),dtype=np.int32)
    # all_data=np.zeros((data.shape[0],6))

    # for i in range(all_data.shape[0]):
    # 	point_id=np.nonzero(bounds>i)[0][0]
    # 	all_data[i,:3]=points[point_id,:]
    # 	all_data[i,3:]=data[i,:3]

    # for i in range(labels.shape[0]):
    # 	success=np.nonzero(all_data[:,3]==i)[0]
    # 	#success2=np.nonzero(all_data[success,2]>0.2)[0]
    # 	howMany[i]=success.size

    # ids_target=np.nonzero(howMany>n_samples)[0]
    # print('Real found: %d'%ids_target.size)
    # print(ids_target)
    #sys.exit()

    new_c = np.genfromtxt('filtered_counts2.csv', delimiter=',', dtype='int')
    with open('file_lists2.csv', 'r') as f:
        reader = csv.reader(f)
        new_n = list(reader)

    samples = 32
    points = 4096
    ids_target = np.nonzero(new_c >= samples)[0]
    print('Actually using %d affordances' % (ids_target.size))

    fig = plt.figure()
    plt.ion()
    ax = fig.add_subplot(121, projection='3d')
    ax2 = fig.add_subplot(122, projection='3d')
    unique_scenes = dict()
    k = 10
    #ax.hold(False)
    if k > 1:
        bar = Bar('Creating original single example training dataset',
                  max=ids_target.shape[0])
        for i in range(ids_target.shape[0]):
            interaction = ids_target[i]
            path_to_data = os.path.abspath('../data')
            name = path_to_data + '/affordances/binaryOc_AffordancesDataset_train' + str(
                interaction) + '_' + str(TRAIN_EXAMPLES) + '.h5'
            if os.path.exists(name):
                continue
            #find training data
            aff_dir = labels[interaction, 0]
            query_object = labels[interaction, 2]
            data_file = path[:pos] + "/" + aff_dir + "/ibs_full_" + labels[
                interaction, 1] + "_" + query_object + ".txt"
            with open(data_file) as f:
                content = f.readlines()
                # you may also want to remove whitespace characters like `\n` at the end of each line
            content = [x.strip() for x in content]
            scene_file = content[0].split(":")[1]
            tmp = content[8].split(":")[1]
            datapoint = tmp.split(',')
            test_point = np.expand_dims(np.asarray(
                [float(x) for x in datapoint]),
                                        axis=0)
            data_file = path[:pos] + "/" + aff_dir + "/" + scene_file
            if '.pcd' in scene_file or '.ply' in scene_file:
                if os.path.exists(data_file):
                    data_file = data_file
            else:
                try_data_file = data_file + '.ply'
                if os.path.exists(try_data_file):
                    #print(try_data_file)
                    data_file = try_data_file
                #maybe pcd extension missing
                else:
                    try_data_file = data_file + '.pcd'
                    if os.path.exists(try_data_file):
                        data_file = try_data_file
            # if scene_file not in unique_scenes:
            # 	unique_scenes[scene_file]=interaction
            # else:
            # 	continue
            if '.pcd' in data_file:
                cloud_training = load_pcd_data(data_file)
            else:
                cloud_training = load_ply_data(data_file)
            data = np.zeros((2, n_points, 3), dtype=np.float32)
            data_labels = np.zeros((2, 1), dtype=np.int32)
            boundingBoxDiag = np.linalg.norm(
                np.min(cloud_training, 0) - np.max(cloud_training, 0))
            #print('%s Diagonal %f Points %d'%(scene_file,boundingBoxDiag,cloud_training.shape[0]))
            #sample a voxel with rad from test-point
            kdt = BallTree(cloud_training, leaf_size=5, metric='euclidean')
            voxel_ids = getVoxel(test_point, max_rad, kdt)
            voxel = cloud_training[voxel_ids, :]
            sample = sample_cloud(voxel, n_points)
            sample_cloud_training = sample_cloud(cloud_training, n_points * 2)
            #genereate a negative example with noise around test_point
            low = test_point[0, 0] - max_rad
            high = test_point[0, 0] + max_rad
            tmp1 = (high - low) * np.random.random_sample(
                (n_points, 1)) + (low)
            low = test_point[0, 1] - max_rad
            high = test_point[0, 1] + max_rad
            tmp2 = (high - low) * np.random.random_sample(
                (n_points, 1)) + (low)
            low = test_point[0, 2] - max_rad
            high = test_point[0, 2] + max_rad
            tmp3 = (high - low) * np.random.random_sample(
                (n_points, 1)) + (low)
            negative_cloud_training = np.concatenate((tmp1, tmp2, tmp3),
                                                     axis=1)
            data[0, ...] = sample - test_point
            data_labels[0, ...] = np.zeros((1, 1), dtype=np.int32)
            data[1, ...] = negative_cloud_training - test_point
            data_labels[1, ...] = np.ones((1, 1), dtype=np.int32)
            #name=path_to_data+'/affordances/binaryOc_AffordancesDataset_train'+str(interaction)+'_'+str(TRAIN_EXAMPLES)+'.h5'
            #print(name)
            save_h5(name, data, data_labels, 'float32', 'uint8')
            ax.scatter(sample_cloud_training[:, 0],
                       sample_cloud_training[:, 1],
                       sample_cloud_training[:, 2],
                       s=1,
                       c='b')
            ax.scatter(sample[:, 0], sample[:, 1], sample[:, 2], s=3, c='b')
            ax2.scatter(negative_cloud_training[:, 0],
                        negative_cloud_training[:, 1],
                        negative_cloud_training[:, 2],
                        s=3,
                        c='r')
            plt.pause(1)
            plt.draw()
            ax.clear()
            ax2.clear()
            bar.next()
        bar.finish()
    name = '../data/affordances/names.txt'
    with open(name, "w") as text_file:
        for i in range(ids_target.shape[0]):
            text_file.write(
                "%d:%s-%s\n" %
                (i, labels[ids_target[i], 0], labels[ids_target[i], 2]))
    def save_samples(self, purpose_hdf5_group: h5py.Group,
                     logdir: pathlib.Path):
        logdir.mkdir(exist_ok=True, parents=True)
        data_hdf5_group = purpose_hdf5_group[f"data"]
        dataset_length = len(data_hdf5_group[ChannelEnum.REC_DEM.value])
        num_samples = int(dataset_length / self.config["sample_frequency"])

        progress_bar = Bar(f"Plot samples for {str(purpose_hdf5_group.name)}",
                           max=num_samples)
        for sample_idx in range(num_samples):
            idx = sample_idx * self.config["sample_frequency"]
            res_grid = data_hdf5_group[ChannelEnum.RES_GRID.value][idx, ...]
            rel_position = data_hdf5_group[ChannelEnum.REL_POSITION.value][idx,
                                                                           ...]
            rec_dem = data_hdf5_group[ChannelEnum.REC_DEM.value][idx, ...]
            occluded_elevation_map = data_hdf5_group[
                ChannelEnum.OCC_DEM.value][idx, ...]
            comp_dem = data_hdf5_group[ChannelEnum.COMP_DEM.value][idx, ...]

            gt_dem = None
            if ChannelEnum.GT_DEM.value in data_hdf5_group:
                gt_dem = data_hdf5_group[ChannelEnum.GT_DEM.value][idx, ...]

            non_occluded_elevation_map = occluded_elevation_map[
                ~np.isnan(occluded_elevation_map)]

            rec_data_um = None
            if ChannelEnum.REC_DATA_UM.value in data_hdf5_group:
                rec_data_um = data_hdf5_group[ChannelEnum.REC_DATA_UM.value][
                    idx, ...]
            comp_data_um = None
            if ChannelEnum.COMP_DATA_UM.value in data_hdf5_group:
                comp_data_um = data_hdf5_group[ChannelEnum.COMP_DATA_UM.value][
                    idx, ...]
            model_um = None
            if ChannelEnum.MODEL_UM.value in data_hdf5_group:
                model_um = data_hdf5_group[ChannelEnum.MODEL_UM.value][idx,
                                                                       ...]
            total_um = None
            if ChannelEnum.TOTAL_UM.value in data_hdf5_group:
                total_um = data_hdf5_group[ChannelEnum.TOTAL_UM.value][idx,
                                                                       ...]

            rec_dems = None
            if ChannelEnum.REC_DEMS.value in data_hdf5_group:
                rec_dems = data_hdf5_group[ChannelEnum.REC_DEMS.value][idx,
                                                                       ...]
            comp_dems = None
            if ChannelEnum.COMP_DEMS.value in data_hdf5_group:
                comp_dems = data_hdf5_group[ChannelEnum.COMP_DEMS.value][idx,
                                                                         ...]

            u = int(
                round(occluded_elevation_map.shape[0] / 2 +
                      rel_position[0] / res_grid[0]))
            v = int(
                round(occluded_elevation_map.shape[1] / 2 +
                      rel_position[1] / res_grid[1]))
            # we only visualize the robot position if its inside the elevation map
            plot_robot_position = 0 < u < occluded_elevation_map.shape[
                0] and 0 < v < occluded_elevation_map.shape[1]
            if plot_robot_position:
                robot_position_pixel = np.array([u, v])
            else:
                robot_position_pixel = None
            indiv_vranges = self.config.get("indiv_vranges", True)

            # 2D
            if indiv_vranges is False:
                elevation_vmin = np.min(
                    [np.min(rec_dem),
                     np.min(comp_dem[~np.isnan(comp_dem)])])
                elevation_vmax = np.max(
                    [np.max(rec_dem),
                     np.max(comp_dem[~np.isnan(comp_dem)])])

                if non_occluded_elevation_map.size != 0:
                    elevation_vmin = np.min(
                        [elevation_vmin,
                         np.min(non_occluded_elevation_map)])
                    elevation_vmax = np.max(
                        [elevation_vmax,
                         np.max(non_occluded_elevation_map)])

                if gt_dem is not None and np.isnan(gt_dem).all() is False:
                    ground_truth_dem_vmin = np.min(gt_dem[~np.isnan(gt_dem)])
                    ground_truth_dem_vmax = np.max(gt_dem[~np.isnan(gt_dem)])
                    elevation_vmin = np.min(
                        [elevation_vmin, ground_truth_dem_vmin])
                    elevation_vmax = np.max(
                        [elevation_vmax, ground_truth_dem_vmax])
            else:
                elevation_vmin = None
                elevation_vmax = None

            elevation_cmap = plt.get_cmap("viridis")

            fig, axes = plt.subplots(nrows=2, ncols=2, figsize=[12, 10])
            # axes = np.expand_dims(axes, axis=0)

            if gt_dem is not None:
                axes[0, 0].set_title("Ground-truth")
                # matshow plots x and y swapped
                mat = axes[0, 0].matshow(np.swapaxes(gt_dem, 0, 1),
                                         vmin=elevation_vmin,
                                         vmax=elevation_vmax,
                                         cmap=elevation_cmap)
                if indiv_vranges:
                    fig.colorbar(mat, ax=axes[0, 0], fraction=0.08)

            axes[0, 1].set_title("Reconstruction")
            # matshow plots x and y swapped
            mat = axes[0, 1].matshow(np.swapaxes(rec_dem, 0, 1),
                                     vmin=elevation_vmin,
                                     vmax=elevation_vmax,
                                     cmap=elevation_cmap)
            if indiv_vranges:
                fig.colorbar(mat, ax=axes[0, 1], fraction=0.08)
            axes[1, 0].set_title("Composition")
            # matshow plots x and y swapped
            mat = axes[1, 0].matshow(np.swapaxes(comp_dem, 0, 1),
                                     vmin=elevation_vmin,
                                     vmax=elevation_vmax,
                                     cmap=elevation_cmap)
            if indiv_vranges:
                fig.colorbar(mat, ax=axes[1, 0], fraction=0.08)
            axes[1, 1].set_title("Occlusion")
            # matshow plots x and y swapped
            mat = axes[1, 1].matshow(np.swapaxes(occluded_elevation_map, 0, 1),
                                     vmin=elevation_vmin,
                                     vmax=elevation_vmax,
                                     cmap=elevation_cmap)
            if indiv_vranges:
                fig.colorbar(mat, ax=axes[1, 1], fraction=0.08)

            if indiv_vranges is False:
                fig.colorbar(mat, ax=axes.ravel().tolist(), fraction=0.045)

            for i, ax in enumerate(axes.reshape(-1)):
                if plot_robot_position:
                    ax.plot([u], [v], marker="*", color="red")

                # Hide grid lines
                ax.grid(False)

            plt.draw()
            plt.savefig(str(logdir / f"sample_2d_{idx}.pdf"))
            if self.remote is not True:
                plt.show()
            plt.close()

            # 3D
            fig = plt.figure(figsize=[2 * 6.4, 1 * 4.8])
            plt.clf()
            axes = []
            num_cols = 3

            x_3d = np.arange(
                start=-int(occluded_elevation_map.shape[0] / 2),
                stop=int(occluded_elevation_map.shape[0] / 2)) * res_grid[0]
            y_3d = np.arange(
                start=-int(occluded_elevation_map.shape[1] / 2),
                stop=int(occluded_elevation_map.shape[1] / 2)) * res_grid[1]
            x_3d, y_3d = np.meshgrid(x_3d, y_3d)

            axes.append(
                fig.add_subplot(100 + num_cols * 10 + 1, projection="3d"))
            # the np.NaNs in the occluded elevation maps give us these warnings:
            warnings.filterwarnings("ignore", category=UserWarning)
            if gt_dem is not None:
                axes[0].set_title("Ground-truth")
                axes[0].plot_surface(x_3d,
                                     y_3d,
                                     gt_dem,
                                     vmin=elevation_vmin,
                                     vmax=elevation_vmax,
                                     cmap=elevation_cmap)
            axes.append(
                fig.add_subplot(100 + num_cols * 10 + 2, projection="3d"))
            axes[1].set_title("Reconstruction")
            axes[1].plot_surface(x_3d,
                                 y_3d,
                                 rec_dem,
                                 vmin=elevation_vmin,
                                 vmax=elevation_vmax,
                                 cmap=elevation_cmap)
            axes.append(
                fig.add_subplot(100 + num_cols * 10 + 3, projection="3d"))
            axes[2].set_title("Occlusion")
            axes[2].plot_surface(x_3d,
                                 y_3d,
                                 occluded_elevation_map,
                                 vmin=elevation_vmin,
                                 vmax=elevation_vmax,
                                 cmap=elevation_cmap)
            warnings.filterwarnings("default", category=UserWarning)
            fig.colorbar(mat, ax=axes, fraction=0.015)

            for i, ax in enumerate(axes):
                if plot_robot_position:
                    ax.scatter([rel_position[0]], [rel_position[1]],
                               [rel_position[2]],
                               marker="*",
                               color="red")
                ax.set_xlabel("x [m]")
                ax.set_ylabel("y [m]")
                ax.set_zlabel("z [m]")

                # Hide grid lines
                ax.grid(False)

            plt.draw()
            plt.savefig(str(logdir / f"sample_3d_{idx}.pdf"))
            if self.remote is not True:
                plt.show()
            plt.close()

            if gt_dem is not None \
                    or rec_data_um is not None or model_um is not None:
                draw_error_uncertainty_plot(
                    idx,
                    logdir,
                    gt_dem=gt_dem,
                    rec_dem=rec_dem,
                    comp_dem=comp_dem,
                    rec_data_um=rec_data_um,
                    comp_data_um=comp_data_um,
                    model_um=model_um,
                    total_um=total_um,
                    robot_position_pixel=robot_position_pixel,
                    remote=self.remote,
                    indiv_vranges=indiv_vranges)

            if rec_dems is not None:
                draw_solutions_plot(idx,
                                    logdir,
                                    ChannelEnum.REC_DEMS,
                                    rec_dems,
                                    robot_position_pixel=robot_position_pixel,
                                    remote=self.remote)

            if comp_dems is not None:
                draw_solutions_plot(idx,
                                    logdir,
                                    ChannelEnum.COMP_DEMS,
                                    rec_dems,
                                    robot_position_pixel=robot_position_pixel,
                                    remote=self.remote)

            if ChannelEnum.REC_TRAV_RISK_MAP.value in data_hdf5_group \
                    and ChannelEnum.COMP_TRAV_RISK_MAP.value in data_hdf5_group:
                rec_trav_risk_map = data_hdf5_group[
                    ChannelEnum.REC_TRAV_RISK_MAP.value][idx, ...]
                comp_trav_risk_map = data_hdf5_group[
                    ChannelEnum.COMP_TRAV_RISK_MAP.value][idx, ...]
                draw_traversability_plot(
                    idx,
                    logdir,
                    gt_dem=gt_dem,
                    rec_dem=rec_dem,
                    comp_dem=comp_dem,
                    rec_data_um=rec_data_um,
                    comp_data_um=comp_data_um,
                    model_um=model_um,
                    total_um=total_um,
                    rec_trav_risk_map=rec_trav_risk_map,
                    comp_trav_risk_map=comp_trav_risk_map,
                    robot_position_pixel=robot_position_pixel,
                    remote=self.remote)

            progress_bar.next()
        progress_bar.finish()
def computeResultStats(descriptor_id):
    file_ids = getResults(descriptor_id)
    print('Found %d actual results' % (len(file_ids)))
    path = os.path.abspath(result_dirs[0])
    print(path)
    file_descriptor = path + '/tmp' + str(descriptor_id) + '.csv'
    labels = np.genfromtxt(file_descriptor,
                           dtype='str',
                           skip_header=1,
                           delimiter=',')
    print('Affordances in descriptor %d' % labels.shape[0])
    counts = np.zeros((labels.shape[0], 1), dtype=np.int32)
    countsFile = "Counts_" + str(descriptor_id) + ".csv"
    if not 'some_counts' in globals():
        # collect some data about affordances found here
        counter = 0
        bar = Bar('Creating new data', max=len(file_ids))
        for file_id in file_ids:
            #read results
            some_results = file_ids[file_id] + file_id + "_goodPointsX.pcd"
            #print('File to read: %s'%some_results)
            some_results_points = file_ids[
                file_id] + file_id + "_goodPoints.pcd"
            newDataName = file_ids[file_id] + file_id + "_newData.csv"
            #if not os.path.exists(newDataName):
            try:
                # read_routine=1
                # with open(some_results_points) as fp:
                # 	for i, line in enumerate(fp):
                # 		if i == 10:
                # 			words=line.split(" ")
                # 			if words[1]!="ascii":
                # 				read_routine=2
                # 			break
                data, _ = load_pcd_data_binary(some_results)
                points, real_c_data = load_pcd_data_binary(some_results_points)
            except Exception as e:
                print('Encoding error in %s' % (file_ids[file_id] + file_id))
                continue
                bar.next()

            #real_c_data=np.array(colors[:,-1],dtype=np.int32)
            red = np.array((real_c_data >> 16) & 0x0000ff,
                           dtype=np.uint8).reshape(-1, 1)
            green = np.array((real_c_data >> 8) & 0x0000ff,
                             dtype=np.uint8).reshape(-1, 1)
            blue = np.array((real_c_data) & 0x0000ff,
                            dtype=np.uint8).reshape(-1, 1)
            real_c_data = np.concatenate((red, green, blue), axis=1)
            perPoint = np.sum(real_c_data, axis=1)
            bounds = np.cumsum(perPoint)

            #Only get points above a height
            minZ = np.min(points[:, 2])

            all_data = np.zeros((data.shape[0], 6))
            start_id = 0
            end_id = bounds[0]
            for i in range(bounds.shape[0]):
                if i > 0:
                    start_id = bounds[i - 1]
                else:
                    start_id = 0
                end_id = bounds[i]
                all_data[start_id:end_id, :3] = points[i, :]
                all_data[start_id:end_id, 3:] = data[start_id:end_id, :3]

            valid_ids = np.nonzero(all_data[:, Z] >= (minZ + 0.3))[0]
            data = all_data[valid_ids, :]
            np.savetxt(newDataName, data, delimiter=",", fmt='%1.6f')
            #else:
            #data=np.genfromtxt(newDataName,delimiter=",",dtype='float32')
            #np.savetxt(newDataName,data,delimiter=",",fmt='%1.6f')
            counter += 1
            counts_tmp = np.bincount(data[:, A_ID].astype(int),
                                     minlength=counts.shape[0])
            counts_tmp = np.expand_dims(counts_tmp, axis=1)
            counts += counts_tmp
            bar.next()
        bar.finish()
    else:
        counts = some_counts
    with open(countsFile, "w") as text_file:
        for i in range(labels.shape[0]):
            text_file.write("%d,%s-%s,%d\n" %
                            (i, labels[i, 0], labels[i, 2], counts[i]))
Exemple #28
0
    def _train_one_epoch(self):
        bar = Bar('Processing', max=len(self.train_data))
        for step, (data, label) in enumerate(self.train_data):

            self.sigma = hm_kernel_size(self.hm_type,
                                        self.last_epoch,
                                        threshold=4)
            target = gene_heatmap(label, self.sigma)
            inputs = Variable(data)
            target = Variable(t.from_numpy(target))
            if len(self.params.gpus) > 0:
                inputs = inputs.cuda()
                target = target.type(t.FloatTensor).cuda()

            # forward
            score = self.model(inputs)
            loss = 0

            # stack hourglass
            for s in range(len(score)):
                loss += self.criterion(score[s], target)
            loss = loss / len(score)

            # simple pose res
            # loss = self.criterion(score[1], target)

            # backward
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step(None)

            # meters update
            self.loss_meter.add(loss.item())

            # evaluation: calculate PCKh
            predictions = spatial_soft_argmax2d(score[len(score) - 1], 1000,
                                                False).cpu().numpy().reshape(
                                                    -1, 2)
            targetcoors = label.numpy().reshape(-1, 2)
            steppckh, steperr = evalPCKh(predictions,
                                         targetcoors,
                                         threshold=50,
                                         alpha=0.2)

            # tensorboard show
            if step % 500 == 0:
                target_shows = t.sum(target[0], 0)
                target_shows[target_shows > 1] = 1
                self.writer.add_image('train/input', inputs[0],
                                      self.last_epoch)
                self.writer.add_image('train/taget',
                                      target_shows,
                                      self.last_epoch,
                                      dataformats='HW')
                self.writer.add_image('train/output',
                                      t.sum(score[1][0], 0),
                                      self.last_epoch,
                                      dataformats='HW')

            bar.suffix = 'Train: [%(index)d/%(max)d] | Epoch: [{0}/{1}]| Loss: {loss:6f} | PCKh: {pckh:4f} | AveErr: {err:.2f} pixel |'.format(
                self.last_epoch,
                self.params.max_epoch,
                loss=loss,
                pckh=steppckh,
                err=steperr)
            bar.next()
        bar.finish()
Exemple #29
0
from PIL import Image, ImageDraw
from glob import glob
from progress.bar import Bar

categories = ["Ball", "Vase", "Corona", "Red", "Crown", "Grey_white"]
colors = ["lime", "white", "orange", "red", "cyan", "pink"]

files = set(glob("data/test/*.jpg"))

#with open("data/test.txt") as f:
#    files = [l.rstrip("\n") for l in f]

print(files)
#files = [l.replace("data/data", "pre_data") for l in files]

progress = Bar('Processing', max=len(files))
for f in files:
    progress.next()
    source = Image.open(f)
    draw = ImageDraw.Draw(source)

    img_width, img_height = source.size

    data = open(f.replace(".jpg", ".txt"))
    for d in data:
        params = d.split(" ")
        params = [float(p) for p in params]
        params[0] = int(params[0])

        color = colors[params[0]]
        x_center = int(params[1] * img_width)
Exemple #30
0
def modulate_fm(x,
                fsBB,
                fsIF,
                del_f=75000,
                BB_BW=15000,
                BW=200000,
                A=1,
                debug=False,
                preemph=True,
                fc=0,
                progress=False):
    '''
  SEEMS TO WORK ALRIGHT

  Modulates some signal x with del_f maximum frequency deviation.  The maximum
  message value mp is extracted from x.

  x: The signal to modulate, a 1D np array
  fsBB: The sample rate of the signal x
  fsRF: The sample rate for the modulation
  del_f: delta f, the maximum frequency deviation
  fc: The centre frequency for the modulation
  BW: The final maximum bandwidth of the signal
  A: The amplitude of the output fm modulated signal
  Returns: An fm modulated signal
  '''
    #Convert everything to float...
    fsBB = float(fsBB)
    fsIF = float(fsIF)
    del_f = float(del_f)
    BW = float(BW)
    A = float(A)

    if progress:
        bar = Bar('FM Modulating ...', max=6)
        bar.next()

    taps = 65
    right_edge = BB_BW / fsBB
    b = remez(taps, [0, right_edge * .95, right_edge * .97, 0.5], [1, 0],
              type='bandpass',
              maxiter=100,
              grid_density=32)
    a = 1
    BB = lfilter(b, a, x)
    if progress:
        bar.next()

    if debug == True:
        fig = plt.figure()
        spec_plot(BB, fsBB, fig, sub_plot=(2, 2, 1), plt_title='BB')

    #Perform the modulation, as well as upsampling to fsIF
    T = len(BB) / fsBB  #The period of time x exists for
    N = fsIF * T  #The number of samples for the RF modulation
    if not fsBB == fsIF:
        BB = resample(BB, N)
    mp = max(BB)
    kf = (2. * pi * del_f) / mp
    if progress:
        bar.next()

    #Preemphasis filtering
    if preemph is True:
        taps = 65
        f1 = 2100.
        f2 = 30000.
        G = f2 / f1
        b = remez(taps, [0, f1 / fsIF, f2 / fsIF, 0.5], [1, G],
                  type='bandpass',
                  maxiter=100,
                  grid_density=32)
        a = 1
        BB = lfilter(b, a, BB)

        if debug == True:
            spec_plot(BB,
                      fsIF,
                      fig,
                      sub_plot=(2, 2, 2),
                      plt_title='Preemphasized BB')

    if progress:
        bar.next()
    #FM modulation
    t = linspace(0, T, len(BB))
    BB_integral = cumtrapz(BB, dx=1. / len(BB), initial=0.)
    fm_modIF = A * cos(2 * pi * fc * t + kf * BB_integral)

    DC = np.average(fm_modIF)
    fm_modIF = fm_modIF - DC

    if debug == True:
        spec_plot(fm_modIF,
                  fsIF,
                  fig,
                  sub_plot=(2, 2, 3),
                  plt_title='Modulated')
    if progress:
        bar.next()

    #Bandwidth limiting
    left_edge = (fc - (BW / 2.)) / fsIF
    right_edge = (fc + (BW / 2.)) / fsIF
    taps = 165
    if left_edge <= 0:
        if right_edge == 0.5:
            bands = [0, 0.5]
            gains = [1]
        bands = [0, right_edge * .97, right_edge * .99, 0.5]
        gains = [1, 0]
    elif right_edge == 0.5:
        bands = [0, left_edge * 1.01, left_edge * 1.03, 0.5]
        gains = [0, 1]
    else:
        bands = [
            0, left_edge * 1.01, left_edge * 1.05, right_edge * .95,
            right_edge * .99, 0.5
        ]
        gains = [0, 1, 0]

    b = remez(taps,
              bands,
              gains,
              type='bandpass',
              maxiter=1000,
              grid_density=32)
    a = 1
    fm_modIF = lfilter(b, a, fm_modIF)
    if progress:
        bar.next()

    if debug == True:
        spec_plot(fm_modIF,
                  fsIF,
                  fig,
                  sub_plot=(2, 2, 4),
                  plt_title='Transmitted')
        fig.show()

    mx = max(max(fm_modIF), abs(min(fm_modIF)))
    fm_modIF = fm_modIF / mx  #normalize to 1
    if progress:
        bar.finish()
    return fm_modIF, kf