Exemple #1
0
 def basename(self):
     beg = 0
     for i in utils.to_zero(len(self.func)):
         if self.func[i] == '>':
             beg = i+1
             break
     return self.func[beg:]
Exemple #2
0
def _parse_func(decl):
    beg_args = None
    depth = 0
    for i in utils.to_zero(len(decl)):
        if decl[i] == ')':
            depth += 1
        elif decl[i] == '(':
            depth -= 1
        if depth == 0 and decl[i] == '(':
            beg_args = i
            break

    # not parsable (e.g., '0' in ext4)
    if beg_args is None:
        return (None, None, None)

    # normal path
    arg_str = decl[beg_args+1:-1]
    arg_str = map(str.strip, arg_str.split(","))
    targs   = map(_parse_typed_arg, arg_str)

    types = map(lambda x: x[0], targs)
    args  = map(lambda x: x[1], targs)
    func  = decl[:beg_args]

    return (func, types, args)
Exemple #3
0
 def move(self, colliders):
     rem_vel_x = self.velocity_x
     rem_vel_y = self.velocity_y
     while rem_vel_x or rem_vel_y:  # while the ball can still move
         target_pos = Vector(rem_vel_x, rem_vel_y) + self.center
         for collider in colliders:
             point1, point2 = collider
             bounce_pos = utils.find_intersection(*self.center, *target_pos, *point1, *point2)
             if bounce_pos is None:
                 continue  # Will never collide unless angle changes
             distance_bounce = utils.distance(*self.center, *bounce_pos) - self.radius
             distance_target = utils.distance(*self.center, *target_pos)
             if distance_bounce > distance_target:
                 continue  # Did not collide yet
             if not utils.is_between(*collider, bounce_pos):
                 continue # Moves past collider
             break
         else:  # Did not collide with any collider -> free to move
             self.center_x += rem_vel_x * self.mod
             self.center_y += rem_vel_y * self.mod
             break
         dist_x = utils.to_zero(bounce_pos[0] - self.center_x, rem_vel_x)
         dist_y = utils.to_zero(bounce_pos[1] - self.center_y, rem_vel_y)
         rem_vel_x -= dist_x
         rem_vel_y -= dist_y
         if collider[0][0] == collider[1][0]:  # collider is vertical
             dist_x = -dist_x
             rem_vel_x = -rem_vel_x
             self.velocity_x = -self.velocity_x
         elif collider[0][1] == collider[1][1]:  # collider is horizontal
             dist_y = -dist_y
             rem_vel_y = -rem_vel_y
             self.velocity_y = -self.velocity_y
         else:
             raise ValueError("Collider", collider, "has to be a straight line")
         self.center_x += dist_x * self.mod
         self.center_y += dist_y * self.mod
         self.mod += .1
Exemple #4
0
def _parse_typed_arg(targ):
    end_type = None
    i = 0
    for i in utils.to_zero(len(targ)):
        if targ[i] == ' ':
            end_type = i
            break

    # no typed arg (e.g., foo(x))
    if i == 0:
        return (None, targ)

    # typed arg (e.g., foo(int x))
    ty = str.strip(targ[0:end_type])
    arg = str.strip(targ[end_type+1:])
    return (ty, arg)
Exemple #5
0
def main(args):
    num_class_dict = {'cub': int(100), 'car': int(98)}
    #  训练日志保存
    log_dir = os.path.join(args.checkpoints, args.log_dir)
    mkdir_if_missing(log_dir)

    sys.stdout = logging.Logger(os.path.join(log_dir, 'log.txt'))
    display(args)

    if args.r is None:
        model = models.create(args.net, Embed_dim=args.dim)
        # load part of the model
        model_dict = model.state_dict()
        # print(model_dict)
        if args.net == 'bn':
            pretrained_dict = torch.load('pretrained_models/bn_inception-239d2248.pth')
        else:
            pretrained_dict = torch.load('pretrained_models/inception_v3_google-1a9a5a14.pth')

        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}

        model_dict.update(pretrained_dict)

        # orth init
        if args.init == 'orth':
            print('initialize the FC layer orthogonally')
            _, _, v = torch.svd(model_dict['Embed.linear.weight'])
            model_dict['Embed.linear.weight'] = v.t()

        # zero bias
        model_dict['Embed.linear.bias'] = torch.zeros(args.dim)

        model.load_state_dict(model_dict)
    else:
        # resume model
        model = torch.load(args.r)

    model = model.cuda()

    # compute the cluster centers for each class here

    def normalize(x):
        norm = x.norm(dim=1, p=2, keepdim=True)
        x = x.div(norm.expand_as(x))
        return x

    data = DataSet.create(args.data, root=None, test=False)

    if args.center_init == 'cluster':
        data_loader = torch.utils.data.DataLoader(
            data.train, batch_size=args.BatchSize, shuffle=False, drop_last=False)

        features, labels = extract_features(model, data_loader, print_freq=32, metric=None)
        features = [feature.resize_(1, args.dim) for feature in features]
        features = torch.cat(features)
        features = features.numpy()
        labels = np.array(labels)

        centers, center_labels = cluster_(features, labels, n_clusters=args.n_cluster)
        center_labels = [int(center_label) for center_label in center_labels]

        centers = Variable(torch.FloatTensor(centers).cuda(),  requires_grad=True)
        center_labels = Variable(torch.LongTensor(center_labels)).cuda()
        print(40*'#', '\n Clustering Done')

    else:
        center_labels = int(args.n_cluster) * list(range(num_class_dict[args.data]))
        center_labels = Variable(torch.LongTensor(center_labels).cuda())

        centers = normalize(torch.rand(num_class_dict[args.data]*args.n_cluster, args.dim))
        centers = Variable(centers.cuda(), requires_grad=True)

    torch.save(model, os.path.join(log_dir, 'model.pkl'))
    print('initial model is save at %s' % log_dir)

    # fine tune the model: the learning rate for pre-trained parameter is 1/10
    new_param_ids = set(map(id, model.Embed.parameters()))

    new_params = [p for p in model.parameters() if
                  id(p) in new_param_ids]

    base_params = [p for p in model.parameters() if
                   id(p) not in new_param_ids]
    param_groups = [
                {'params': base_params, 'lr_mult': 0.1},
                {'params': new_params, 'lr_mult': 1.0},
                {'params': centers, 'lr_mult': 1.0}]

    optimizer = torch.optim.Adam(param_groups, lr=args.lr,
                                 weight_decay=args.weight_decay)

    cluster_counter = np.zeros([num_class_dict[args.data], args.n_cluster])
    criterion = losses.create(args.loss, alpha=args.alpha, centers=centers,
                              center_labels=center_labels, cluster_counter=cluster_counter).cuda()

    # random sampling to generate mini-batch
    train_loader = torch.utils.data.DataLoader(
        data.train, batch_size=args.BatchSize, shuffle=True, drop_last=False)

    # save the train information
    epoch_list = list()
    loss_list = list()
    pos_list = list()
    neg_list = list()

    # _mask = Variable(torch.ByteTensor(np.ones([2, 4]))).cuda()
    dtype = torch.ByteTensor
    _mask = torch.ones(int(num_class_dict[args.data]), args.n_cluster).type(dtype)
    _mask = Variable(_mask).cuda()

    for epoch in range(args.start, args.epochs):
        epoch_list.append(epoch)

        running_loss = 0.0
        running_pos = 0.0
        running_neg = 0.0
        to_zero(cluster_counter)

        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            # wrap them in Variable
            inputs = Variable(inputs.cuda())

            # type of labels is Variable cuda.Longtensor
            labels = Variable(labels).cuda()
            optimizer.zero_grad()
            # centers.zero_grad()
            embed_feat = model(inputs)

            # update network weight
            loss, inter_, dist_ap, dist_an = criterion(embed_feat, labels, _mask)
            loss.backward()
            optimizer.step()

            centers.data = normalize(centers.data)

            running_loss += loss.data[0]
            running_neg += dist_an
            running_pos += dist_ap

            if epoch == 0 and i == 0:
                print(50 * '#')
                print('Train Begin -- HA-HA-HA')
            if i % 10 == 9:
                print('[Epoch %05d Iteration %2d]\t Loss: %.3f \t Accuracy: %.3f \t Pos-Dist: %.3f \t Neg-Dist: %.3f'
                      % (epoch + 1,  i+1, loss.data[0], inter_, dist_ap, dist_an))
        # cluster number counter show here
        print(cluster_counter)
        loss_list.append(running_loss)
        pos_list.append(running_pos / i)
        neg_list.append(running_neg / i)
        # update the _mask to make the cluster with only 1 or no member to be silent
        # _mask = Variable(torch.FloatTensor(cluster_counter) > 1).cuda()
        # cluster_distribution = torch.sum(_mask, 1).cpu().data.numpy().tolist()
        # print(cluster_distribution)
        # print('[Epoch %05d]\t Loss: %.3f \t Accuracy: %.3f \t Pos-Dist: %.3f \t Neg-Dist: %.3f'
        #       % (epoch + 1, running_loss, inter_, dist_ap, dist_an))

        if epoch % args.save_step == 0:
            torch.save(model, os.path.join(log_dir, '%d_model.pkl' % epoch))
    np.savez(os.path.join(log_dir, "result.npz"), epoch=epoch_list, loss=loss_list, pos=pos_list, neg=neg_list)