예제 #1
0
def tag_images():
    """
    reads all the rows from read_csv_file into a list, displays one image url and name to the user at a time
    using a global counter, and keeps going until the end of the list is reached. At this point, a "Done Template"
    will be displayed, the counter will be reset, and the read_csv_file will be deleted. Once a user submits
    a tag, it will be appended to the corresponding list data item, and then written out to the write_csv_file.

    :return: rendered form with list(image_url, image_name) as context
    """
    global LIST_INDEX

    # create a sample read_csv_file using the image_list_data at the top
    # list_to_csv(csv_read_file, sample_data_list)

    # read the read_csv_file into a list
    IMAGE_DATA_LIST = csv_to_list(csv_read_file)

    image_data = IMAGE_DATA_LIST[LIST_INDEX]

    new_image_data = image_data[:]
    for value in request.form.values():
        if value is not None:
            new_image_data.append(value)

        CSV_WRITE_DATA.append(new_image_data)
        LIST_INDEX += 1
        if LIST_INDEX == len(IMAGE_DATA_LIST):
            LIST_INDEX = 0
            list_to_csv(csv_write_file, CSV_WRITE_DATA)
            return render_template('done.html')
        image_data = IMAGE_DATA_LIST[LIST_INDEX]

    return render_template('show_image.html', image_data=image_data)
예제 #2
0
 def get_db_prep_value(self, value, connection, prepared=False):
     value = value if prepared else self.get_prep_value(value)
     
     if not value:
         return value
     if not connection.settings_dict['ENGINE'] in PG_ENGINES:
         value = list_to_csv(value)
         return self._textfield.get_db_prep_value(value)
     
     get_db_prep_value = self._fieldtype.get_db_prep_value
     
     if isinstance(value, (list, tuple)):
         return [get_db_prep_value(v, connection, True) for v in value]
     else:
         return get_db_prep_value(value)
예제 #3
0
    def get_db_prep_value(self, value, connection, prepared=False):
        value = value if prepared else self.get_prep_value(value)

        if not value:
            return value
        if not connection.settings_dict['ENGINE'] in PG_ENGINES:
            value = list_to_csv(value)
            return self._textfield.get_db_prep_value(value)

        get_db_prep_value = self._fieldtype.get_db_prep_value

        if isinstance(value, (list, tuple)):
            return [get_db_prep_value(v, connection, True) for v in value]
        else:
            return get_db_prep_value(value)
예제 #4
0
def ignore_affliction(line, time, backrefs):
	args = backrefs[1]

	if len(args) == 0:
		if len(player.ignored_afflictions) == 0:
			core.echo("You aren't ignoring any afflictions.")
		else:
			affs = utils.list_to_csv(player.ignored_afflictions)
			core.echo("Currently ignoring: " + affs + ".")
		return

	arg = args.strip()

	if arg not in player.afflictions.keys():
		core.echo("That's not a valid affliction.")
		return
	else:
		player.ignored_afflictions.append(arg)
		core.echo("Added " + arg + " to ignored afflictions.")
예제 #5
0
 def lookup_users(self, user_ids=None, screen_names=None):
     return self._lookup_users(list_to_csv(user_ids),
                               list_to_csv(screen_names))
예제 #6
0
파일: api.py 프로젝트: DonghoChoi/Socrates
 def lookup_friendships(self, user_ids=None, screen_names=None):
     return self._lookup_friendships(list_to_csv(user_ids), list_to_csv(screen_names))
예제 #7
0
 def render(self, name, value, attrs=None):
     if value is not None:
         value = list_to_csv(value)
     return super(PgArrayWidget, self).render(name, value, attrs)
예제 #8
0
def validate(val_loader, model, criterion, args, prefix='val_'):
    loader_len = len(val_loader)
    if loader_len < 2:
        raise ValueError(
            'val_loader only supports 2 or more batches and loader_len: ' +
            str(loader_len))
    batch_time = AverageMeter()
    data_time = AverageMeter()
    speed = AverageMeter()
    losses = AverageMeter()
    abs_cart_m = AverageMeter()
    abs_angle_m = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()

    cart_error, angle_error = [], []
    prefetcher = data_prefetcher(val_loader)
    input_img, input_vec, target = prefetcher.next()
    batch_size = input_img.size(0)
    i = -1
    if args.local_rank == 0:
        progbar = tqdm(total=loader_len)
    else:
        progbar = None
    while input_img is not None:
        i += 1
        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        with torch.no_grad():
            # output = model(input)
            # loss = criterion(output, target)
            # note here the term output is equivalent to logits
            output, _ = model(input_img, input_vec)
            loss = criterion(output, target)

        # measure accuracy and record loss
        batch_abs_cart_distance, batch_abs_angle_distance = accuracy(
            output.data.cpu().numpy(),
            target.data.cpu().numpy())
        abs_cart_f, abs_angle_f = np.mean(batch_abs_cart_distance), np.mean(
            batch_abs_angle_distance)
        cart_error.extend(batch_abs_cart_distance)
        angle_error.extend(batch_abs_angle_distance)

        if args.distributed:
            reduced_loss = reduce_tensor(loss.data)
            abs_cart_f = reduce_tensor(abs_cart_f)
            abs_angle_f = reduce_tensor(abs_angle_f)
        else:
            reduced_loss = loss.data

        losses.update(reduced_loss, batch_size)
        abs_cart_m.update(abs_cart_f, batch_size)
        abs_angle_m.update(abs_angle_f, batch_size)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if args.local_rank == 0:
            progbar.update()
        if args.local_rank == 0 and i % args.print_freq == 0:
            speed.update(args.world_size * args.batch_size / batch_time.val)
            progbar.set_description(
                # 'Test: [{0}/{1}]\t'
                'Valid (cur/avg)  '
                'batch_t: {batch_time.val:.3f}/{batch_time.avg:.3f}, '
                'img/s: {0:.1f}/{1:.1f}, '
                'loss: {loss.val:.4f}/{loss.avg:.4f}, '
                'abs_cart: {abs_cart.val:.2f}/{abs_cart.avg:.2f}, '
                'abs_angle: {abs_angle.val:.2f}/{abs_angle.avg:.2f}, prog'.
                format(
                    #    i, len(val_loader),
                    speed.val,
                    speed.avg,
                    batch_time=batch_time,
                    loss=losses,
                    abs_cart=abs_cart_m,
                    abs_angle=abs_angle_m))

        input_img, input_vec, target = prefetcher.next()

    # logger.info(' * combined_error {combined_error.avg:.3f} top5 {top5.avg:.3f}'
    #       .format(combined_error=combined_error, top5=top5))
    if args.feature_mode != 'rotation_only':  # translation_only or all_features: save cartesian csv
        utils.list_to_csv(
            os.path.join(args.save,
                         prefix + args.abs_cart_error_output_csv_name),
            cart_error)
    if args.feature_mode != 'translation_only':  # rotation_only or all_features: save angle csv
        utils.list_to_csv(
            os.path.join(args.save,
                         prefix + args.abs_angle_error_output_csv_name),
            angle_error)
    stats = get_stats(progbar, prefix, args, batch_time, data_time, abs_cart_m,
                      abs_angle_m, losses, speed)
    if progbar is not None:
        progbar.close()
        del progbar

    # Return the weighted sum of absolute cartesian and angle errors as the metric
    return (args.cart_weight * abs_cart_m.avg +
            (1 - args.cart_weight) * abs_angle_m.avg), stats
예제 #9
0
def train(train_loader, model, criterion, optimizer, epoch, args):
    loader_len = len(train_loader)
    if loader_len < 2:
        raise ValueError(
            'train_loader only supports 2 or more batches and loader_len: ' +
            str(loader_len))

    batch_time = AverageMeter()
    data_time = AverageMeter()
    speed = AverageMeter()
    losses = AverageMeter()
    abs_cart_m = AverageMeter()
    abs_angle_m = AverageMeter()
    sigmoid = torch.nn.Sigmoid()

    # switch to train mode
    model.train()
    end = time.time()
    prefetcher = data_prefetcher(train_loader,
                                 cutout=args.cutout,
                                 cutout_length=args.cutout_length)

    cart_error, angle_error = [], []
    input_img, input_vec, target = prefetcher.next()
    batch_size = input_img.size(0)
    i = -1
    if args.local_rank == 0:
        progbar = tqdm(total=len(train_loader),
                       leave=False,
                       dynamic_ncols=True)
    else:
        progbar = None
    while input_img is not None:
        i += 1
        # scheduler in main now adjusts the lr
        # adjust_learning_rate(optimizer, epoch, i, len(train_loader))

        if args.prof:
            if i > 10:
                break
        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        # note here the term output is equivalent to logits
        output, logits_aux = model(input_img, input_vec)
        output = sigmoid(output)
        loss = criterion(output, target)
        if logits_aux is not None and args.auxiliary:
            logits_aux = sigmoid(logits_aux)
            loss_aux = criterion(logits_aux, target)
            loss += args.auxiliary_weight * loss_aux

        # measure accuracy and record loss
        with torch.no_grad():
            output_np = output.cpu().detach().numpy()
            target_np = target.cpu().detach().numpy()
            batch_abs_cart_distance, batch_abs_angle_distance = accuracy(
                output_np, target_np)
            abs_cart_f, abs_angle_f = np.mean(
                batch_abs_cart_distance), np.mean(batch_abs_angle_distance)
            cart_error.extend(batch_abs_cart_distance)
            angle_error.extend(batch_abs_angle_distance)

        if args.distributed:
            reduced_loss = reduce_tensor(loss.data)
            abs_cart_f = reduce_tensor(abs_cart_f)
            abs_angle_f = reduce_tensor(abs_angle_f)
        else:
            reduced_loss = loss.data

        losses.update(reduced_loss, batch_size)
        abs_cart_m.update(abs_cart_f, batch_size)
        abs_angle_m.update(abs_angle_f, batch_size)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        torch.cuda.synchronize()
        # measure elapsed time
        batch_time.update(time.time() - end)

        end = time.time()
        input_img, input_vec, target = prefetcher.next()

        if args.local_rank == 0:
            progbar.update()
        if args.local_rank == 0 and i % args.print_freq == 0 and i > 1:
            speed.update(args.world_size * args.batch_size / batch_time.val)
            progbar.set_description(
                #   'Epoch: [{0}][{1}/{2}]\t'
                'Train (cur/avg)  '
                'batch_t: {batch_time.val:.3f}/{batch_time.avg:.3f}, '
                'img/s: {0:.1f}/{1:.1f}  '
                'load_t: {data_time.val:.3f}/{data_time.avg:.3f}, '
                'loss: {loss.val:.4f}/{loss.avg:.4f}, '
                'cart: {abs_cart.val:.2f}/{abs_cart.avg:.2f}, '
                'angle: {abs_angle.val:.2f}/{abs_angle.avg:.2f}, prog'.format(
                    #    epoch, i, len(train_loader),
                    speed.val,
                    speed.avg,
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses,
                    abs_cart=abs_cart_m,
                    abs_angle=abs_angle_m))
    stats = {}
    prefix = 'train_'
    if args.feature_mode != 'rotation_only' and len(
            cart_error
    ) > 0:  # translation_only or all_features: save cartesian csv
        utils.list_to_csv(
            os.path.join(args.save,
                         prefix + args.abs_cart_error_output_csv_name),
            cart_error)
    if args.feature_mode != 'translation_only' and len(
            angle_error) > 0:  # rotation_only or all_features: save angle csv
        utils.list_to_csv(
            os.path.join(args.save,
                         prefix + args.abs_angle_error_output_csv_name),
            angle_error)
    stats = get_stats(progbar, prefix, args, batch_time, data_time, abs_cart_m,
                      abs_angle_m, losses, speed)
    if progbar is not None:
        progbar.close()
        del progbar
    return stats