Пример #1
0
def flower(n_points, petals, petal_length, petal_width, seed):
    #plant the flower seed
    random.seed(seed)
    flowerpoints = np.zeros([n_points, 2])
    flowerlabels = np.zeros([n_points])
    for p in range(n_points):
        petal = random.randint(0, petals)
        flowerlabels[p] = petal
        #find center
        if petal == 0:
            center = np.array([.5, .5])
        else:
            angle0 = (petal - 1) / (petals - 1) * 2 * math.pi
            radius0 = petal_length * random.random()
            center = np.array([.5, .5]) + angletocoords2d(angle0, radius0)

        angle1 = 2 * math.pi * random.random()
        if petal == 0:
            radius1 = petal_width * random.random(
            ) / 2  #2 * petal_width * random.random()
        else:
            radius1 = petal_width * random.random() * (
                .75 + -abs(radius0 / petal_length - .75))
        translation = angletocoords2d(angle1, radius1)
        flowerpoints[p, :] = center + translation
    return torch.tensor(flowerpoints,
                        dtype=torch.float), torch.tensor(flowerlabels,
                                                         dtype=torch.int)
Пример #2
0
def grid(n_points, n_lines, seed):
    random.seed(seed)
    n_pointsonaline = int(n_points / n_lines)
    grid_points = np.zeros([n_points, 2])
    grid_labels = np.zeros([n_points])
    for x in range(n_lines):
        for y in range(n_pointsonaline):
            angle1 = 2 * math.pi * random.random()
            radius1 = random.random() / n_lines

            grid_points[x * n_pointsonaline + y, :] = np.array([
                x / n_lines, y / n_pointsonaline
            ]) + angletocoords2d(angle1, radius1) / n_pointsonaline
    return torch.tensor(grid_points,
                        dtype=torch.float), torch.tensor(grid_labels,
                                                         dtype=torch.int)
Пример #3
0
def circle(n_points, width, seed=0):
    random.seed(seed)
    circlepoints = np.zeros([n_points, 2])
    for k in range(n_points):
        angle = 2 * math.pi * k / n_points
        radius = 1
        angle_sway = random.random() * 2 * math.pi
        radius_sway = width * random.random()
        center = angletocoords2d(angle, radius)
        translation = angletocoords2d(angle_sway, radius_sway)
        circlepoints[k, :] = (center + translation) / 2 + 0.5
    return torch.tensor(circlepoints, dtype=torch.float)
Пример #4
0
def simplexflower(n_points, dim, var, seed=0):
    random.seed(seed)
    n_points = n_points - n_points % dim
    single_num = int(n_points / dim)
    simflpoints = []
    for i in range(dim):
        mean = np.zeros(dim)
        mean[i] = 0.5
        cov = np.identity(dim) * var
        cov[i, i] = 0.1
        points = np.random.multivariate_normal(mean, cov, single_num)
        simflpoints.append(points)
    simflpoints = np.vstack(simflpoints)
    return torch.tensor(simflpoints, dtype=torch.float)
Пример #5
0
def gaussimplex(n_points, dim, var, seed=0):
    random.seed(seed)
    n_points = n_points - n_points % dim
    single_num = int(n_points / dim)
    simplexpoints = []
    mean = np.zeros(dim)
    cov = np.identity(dim) * var
    for i in range(dim):
        v = np.zeros(dim)
        v[i] = 1
        points = np.random.multivariate_normal(mean, cov, single_num) + v
        simplexpoints.append(points)
    simplexpoints = np.vstack(simplexpoints)
    return torch.tensor(simplexpoints, dtype=torch.float)
Пример #6
0
def gaussflower(n_points, petals, seed=0):
    random.seed(seed)
    n_points = n_points - n_points % petals
    gausspoints = []
    single_num = int(n_points / petals)
    for i in range(petals):
        points = rotate2d(
            2 * math.pi * i / petals,
            np.random.multivariate_normal([3, 0],
                                          [[1, 0], [0, 1 / (10 * petals)]],
                                          single_num))
        gausspoints.append(points)
    gausspoints = np.vstack(gausspoints)
    return torch.tensor(gausspoints, dtype=torch.float)
Пример #7
0
def snail(n_points, bend, width, dim, seed=0):
    random.seed(seed)
    snailpoints = np.zeros([n_points, dim])
    for k in range(n_points):
        angle = bend * 2 * math.pi * k / n_points
        radius = ((n_points - k) / n_points) * 10
        angle_sway = random.random()
        radius_sway = width * random.random()
        center = np.random.uniform(-1, 1, dim)
        center[:2] = angletocoords2d(angle, radius)
        #center = angletocoords2d(angle, radius)
        translation = np.zeros(dim)
        translation[:2] = angletocoords2d(angle_sway, radius_sway)
        #translation = angletocoords2d(angle_sway, radius_sway)
        snailpoints[k, :] = (center + translation) / 2 + 0.5
    return torch.tensor(snailpoints, dtype=torch.float)
Пример #8
0
def spheresimplex(n_points, dim, rad, seed=0):
    random.seed(seed)
    n_points = n_points - n_points % dim
    single_num = int(n_points / dim)
    simplexpoints = []
    mean = np.zeros(dim)
    cov = np.identity(dim)
    for i in range(dim):
        v = np.zeros(dim)
        v[i] = 1
        points = np.random.multivariate_normal(mean, cov, single_num)
        norm = np.linalg.norm(points, axis=1)
        points = (points / norm[:, np.newaxis]) * rad + v
        simplexpoints.append(points)
    simplexpoints = np.vstack(simplexpoints)
    return torch.tensor(simplexpoints, dtype=torch.float)
Пример #9
0
def disc(n_points, width, seed=0):
    random.seed(seed)
    discpoints = np.zeros([n_points, 2])
    for k in range(n_points):
        if k % 2 == 0:
            angle = 2 * math.pi * k / n_points
            radius = 1
            angle_sway = random.random() * 2 * math.pi
            radius_sway = width * random.random()
            center = angletocoords2d(angle, radius)
            translation = angletocoords2d(angle_sway, radius_sway)
            discpoints[k, :] = (center + translation) / 2 + 0.5
        else:
            angle = random.random() * 2 * math.pi
            radius = math.sqrt(random.random())
            center = angletocoords2d(angle, radius)
            discpoints[k, :] = center / 2 + 0.5
    return torch.tensor(discpoints, dtype=torch.float)
Пример #10
0
def train(train_loader,
          model,
          optimizer,
          epoch,
          args,
          task,
          metric_meter=None,
          scheduler=None,
          summary_logger=None):
    # calc batch time
    batch_time = utils.AverageMeter()

    # reset metric_meter
    metric_meter.reset()
    steps_per_epoch = len(train_loader)

    # switch to train mode
    model.train()
    end = time.time()

    for step_inbatch, batch in enumerate(train_loader):
        # select model inputs
        inputs = task.select_model_inputs(batch)

        # forward pass
        outputs = model(**inputs)

        # get loss and logits
        loss = outputs[0]
        logits = outputs[1]

        # schedule learning rate
        if scheduler is not None:
            scheduler.step()

        # backward pass
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # get predictions
        if task.logit_to_id and isinstance(logits, torch.Tensor):
            predictions = utils.get_ids_from_logits(logits.clone())
        elif isinstance(logits, torch.Tensor):
            predictions = logits.clone()
        else:
            predictions = logits

        global_step = epoch * steps_per_epoch + step_inbatch
        if global_step % args.print_freq == 0:
            with torch.no_grad():
                batch_time.update((time.time() - end) / args.print_freq)
                end = time.time()

                # update metrics
                metric_meter.update_scores("loss", {
                    'score': loss.cpu().numpy(),
                    'count': 1
                })
                if isinstance(predictions, torch.Tensor):
                    predictions = predictions.cpu().numpy()
                gathered_dict = {
                    k: v.cpu().numpy() if torch.is_tensor(v) else v
                    for k, v in batch.items()
                }
                gathered_dict['predictions'] = predictions

                metric_meter.update_metrics(gathered_dict)

                average_scores = metric_meter.get_average_scores()
                if args.distributed:
                    average_scores = {
                        k: {
                            'score':
                            reduce_sum_tensor(
                                torch.tensor(v['score'] * v['count'],
                                             device='cuda')).cpu().numpy(),
                            'count':
                            reduce_sum_tensor(
                                torch.tensor(v['count'],
                                             device='cuda')).cpu().numpy()
                        }
                        for k, v in average_scores.items()
                    }
                    average_scores = {
                        k: {
                            'score': v['score'] / v['count'],
                            'count': v['count']
                        }
                        for k, v in average_scores.items()
                    }

                if args.local_rank == 0 or not args.distributed:
                    score_log = summary_logger(average_scores, global_step,
                                               args.task, "train")
                    logging.info(
                        '-----Training----- \nEpoch: [{0}][{1}/{2}]\t'
                        'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                        'Speed {3:.3f} ({4:.3f})\t'.format(
                            epoch,
                            step_inbatch,
                            steps_per_epoch,
                            args.batch_size / batch_time.val,
                            args.batch_size / batch_time.avg,
                            batch_time=batch_time) + score_log)
Пример #11
0
def validate(eval_loader, model, epoch, args, task, metric_meter):
    batch_time = utils.AverageMeter()

    # reset metric_meter
    metric_meter.reset()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()

        for step_inbatch, batch in enumerate(eval_loader):
            # select model inputs
            inputs = task.select_model_inputs(batch)
            # forward pass
            outputs = model(**inputs)

            # get loss and logits
            loss = outputs[0]
            logits = outputs[1]

            # get predictions
            if task.logit_to_id and isinstance(logits, torch.Tensor):
                predictions = utils.get_ids_from_logits(logits.clone())
            elif isinstance(logits, torch.Tensor):
                predictions = logits.clone()
            else:
                predictions = logits

            # update metrics
            metric_meter.update_scores("loss", {
                'score': loss.cpu().numpy(),
                'count': 1
            })
            if isinstance(predictions, torch.Tensor):
                predictions = predictions.cpu().numpy()
            gathered_dict = {
                k: v.cpu().numpy() if torch.is_tensor(v) else v
                for k, v in batch.items()
            }
            gathered_dict['predictions'] = predictions
            metric_meter.update_metrics(gathered_dict)

            # reduce average scores
            average_scores = metric_meter.get_average_scores()
            if args.distributed:
                average_scores = {
                    k: {
                        'score':
                        reduce_sum_tensor(
                            torch.tensor(v['score'] * v['count'],
                                         device='cuda')).cpu().numpy(),
                        'count':
                        reduce_sum_tensor(
                            torch.tensor(v['count'],
                                         device='cuda')).cpu().numpy()
                    }
                    for k, v in average_scores.items()
                }
                average_scores = {
                    k: {
                        'score': v['score'] / v['count'],
                        'count': v['count']
                    }
                    for k, v in average_scores.items()
                }

            if step_inbatch % args.print_freq == 0:
                batch_time.update((time.time() - end) / args.print_freq)
                end = time.time()

                if args.local_rank == 0 or not args.distributed:
                    score_log = metric_meter.get_score_str(
                        "eval", average_scores=average_scores)

                    logging.info(
                        '-----Evaluation----- \nEpoch: [{0}][{1}/{2}]\t'
                        'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                        'Speed {3:.3f} ({4:.3f})\t'.format(
                            epoch,
                            step_inbatch,
                            len(eval_loader),
                            args.batch_size / batch_time.val,
                            args.batch_size / batch_time.avg,
                            batch_time=batch_time) + score_log)

    # recude final scores
    average_scores = metric_meter.get_average_scores()
    if args.distributed:
        average_scores = {
            k: {
                'score':
                reduce_sum_tensor(
                    torch.tensor(v['score'] * v['count'],
                                 device='cuda')).cpu().numpy(),
                'count':
                reduce_sum_tensor(torch.tensor(v['count'],
                                               device='cuda')).cpu().numpy()
            }
            for k, v in average_scores.items()
        }
        average_scores = {
            k: {
                'score': v['score'] / v['count'],
                'count': v['count']
            }
            for k, v in average_scores.items()
        }

    if args.local_rank == 0 or not args.distributed:
        metric_meter.reset()
        metric_meter.set_average_scores(average_scores)
        score_log = metric_meter.get_score_str("eval",
                                               average_scores=average_scores)
        logging.info('-----Evaluation-----\n' + score_log)

    return metric_meter