Exemple #1
0
	def simulate(self):
		"""
		Run the simulation
		"""

		log.info('Starting simulation')

		games_folder = os.getcwd() + '/Human_Games'

		wins = 0
		losses = 0
		num_questions = 0
		avg_win = 0
		avg_lose = 0
		questions_asked = {}
		question_answers = {}

		for number in range(16, 31):
			game = Game(number)

			game_wins, game_losses, game_num_questions, game_win_avg, game_lose_avg, game_answers, game_questions = game.play()

			questions_asked[game.id] = game_questions
			question_answers = game_answers

			wins += game_wins
			losses += game_losses
			num_questions += game_num_questions
			avg_win += game_win_avg
			avg_lose += game_lose_avg

			models.build(game, 3, questions_asked, question_answers)
Exemple #2
0
	def setup(self):
		"""
		Perform optional pre-simulation tasks
		"""

		log.info('Performing setup')
		db.cursor.execute('DELETE FROM Pqd')
		db.connection.commit()
		questions.copy_into_answers()
		questions.build_pqd()
		models.build(Game(15), 1)
		for number in range(16, 31):
			models.evaluation_1(Game(number))
Exemple #3
0
def main(data_args=None,
         optimizer_args=None,
         model_args=None,
         loss_args=None,
         train_args=None):
    '''Main function for continuous BGAN.
    
    '''

    print_section(
        'LOADING DATA')  ##############################################
    train_stream, training_samples, shape, viz_options = load_stream(
        **data_args)
    train_args['training_samples'] = training_samples
    setup_viz(**viz_options)
    model_args.update(**shape)

    print_section(
        'MODEL')  #####################################################
    noise_var = T.matrix('noise')
    input_var = T.tensor4('inputs')

    if loss_args['loss'] == 'bgan':
        log_Z = theano.shared(lasagne.utils.floatX(0.), name='log_Z')
        loss_args['loss_options']['log_Z'] = log_Z
    else:
        log_Z = None

    logger.info('Building model and compiling GAN functions...')
    logger.info('Model args: {}'.format(model_args))
    generator, discriminator = build(noise_var, input_var, **model_args)

    real_out = lasagne.layers.get_output(discriminator)
    fake_out = lasagne.layers.get_output(discriminator,
                                         lasagne.layers.get_output(generator))

    g_results, d_results = get_losses(real_out,
                                      fake_out,
                                      optimizer_args=optimizer_args,
                                      **loss_args)

    if log_Z is not None:
        log_Z_est = est_log_Z(fake_out)
        g_results.update(**{'log Z': log_Z, 'log Z (est)': log_Z_est.mean()})

    print_section(
        'OPTIMIZER')  #################################################
    train_d, train_g, gen = setup(input_var, noise_var, log_Z, generator,
                                  discriminator, g_results, d_results,
                                  **optimizer_args)

    print_section(
        'TRAIN')  #####################################################
    try:
        train(train_d, train_g, gen, train_stream, **train_args)
    except KeyboardInterrupt:
        logger.info('Training interrupted')
        print_section(
            'DONE')  ##################################################
        exit(0)
Exemple #4
0
def generate_models():

    gen = sbcnn_generator()
    data = {
        'model_path': [],
        'gen_path': [],
        'id': [],
    }
    for out in iter(gen):
        model = None

        try:
            params, settings = out
            model = models.build(settings.copy())
        except ValueError as e:
            print('Error:', e)
            continue

        # Store parameters
        for k, v in params.items():
            if data.get(k) is None:
                data[k] = []
            data[k].append(v)

        model_id = str(uuid.uuid4())
        out_dir = os.path.join('scan', model_id)
        os.makedirs(out_dir)

        model_path = os.path.join(out_dir, 'model.orig.hdf5')
        out_path = os.path.join(out_dir, 'gen')

        # Store model
        model.save(model_path)
        stats = stm32convert.generatecode(model_path,
                                          out_path,
                                          name='network',
                                          model_type='keras',
                                          compression=None)

        # Store model info
        data['model_path'].append(model_path)
        data['gen_path'].append(out_path)
        data['id'].append(model_id)

        for k, v in stats.items():
            if data.get(k) is None:
                data[k] = []
            data[k].append(v)

    df = pandas.DataFrame(data)
    return df
Exemple #5
0
def build_deformable_detr(panoptic=False, num_classes=91, **kwargs):
    args = Namespace()
    args.dataset_file = 'coco_panoptic' if panoptic else 'coco'
    args.device = kwargs.get('device', 'cuda')
    args.num_classes = num_classes
    args.num_feature_levels = kwargs.get('feature_levels', 4)
    args.aux_loss = kwargs.get('aux_loss', True)
    args.with_box_refine = kwargs.get('with_box_refine', False)
    args.masks = kwargs.get('masks', False)
    args.mask_loss_coef = kwargs.get('mask_loss_coef', 1.0)
    args.dice_loss_coef = kwargs.get('dice_loss_coef', 1.0)
    args.cls_loss_coef = kwargs.get('cls_loss_coef', 2.0)
    args.bbox_loss_coef = kwargs.get('bbox_loss_coef', 5.0)
    args.giou_loss_coef = kwargs.get('giou_loss_coef', 2.0)
    args.focal_alpha = kwargs.get('focal_alpha', 0.25)
    args.frozen_weights = kwargs.get('frozen_weights', None)

    # backbone
    args.backbone = kwargs.get('backbone', 'resnet50')
    args.lr_backbone = kwargs.get('lr_backbone', 2e-5)
    args.dilation = kwargs.get('dilation', False)

    # positional encoding
    args.position_embedding = kwargs.get('position_embedding', 'sine') # learned
    args.hidden_dim = kwargs.get('hidden_dim', 256)
    
    # transformer
    args.nheads = kwargs.get('nheads', 8)
    args.dim_feedforward = kwargs.get('dim_feedforward', 1024)
    args.enc_layers = kwargs.get('enc_layers', 6)
    args.dec_layers = kwargs.get('dec_layers', 6)
    args.dropout = kwargs.get('dropout', 0.1)
    args.dec_n_points = kwargs.get('dec_n_points', 4)
    args.enc_n_points = kwargs.get('enc_n_points', 4)
    args.num_queries = kwargs.get('num_queries', 300)
    args.two_stage = kwargs.get('two_stage', False)

    # loss
    args.set_cost_class = kwargs.get('set_cost_class', 2) 
    args.set_cost_bbox = kwargs.get('set_cost_bbox', 5) 
    args.set_cost_giou = kwargs.get('set_cost_giou', 2) 

    model, criterion, postprocessors = build(args)
    model.to(args.device)

    return_postprocessors = kwargs.get('return_postprocessors', False)
    if return_postprocessors:
        return model, postprocessors
    return model
Exemple #6
0
    def __init__(self, config_path, disable_tqdm=False):
        self.config = load_yaml.load(config_path)
        self.disable_tqdm = disable_tqdm

        misc.seeds.set_seeds(self.config['seed'], self.config['deterministic'])
        self.amp = self.config['amp']
        if self.amp:
            self.scaler = torch.cuda.amp.GradScaler()
        self.train_dataloader = datasets.build(
            'train', self.config['train_dataset_args'])
        self.val_dataloader = datasets.build('val',
                                             self.config['val_dataset_args'])

        self.device = torch.device('cuda')
        self.model = models.build(**self.config)
        self.model = self.model.to(self.device)
        logger.info(self.model)

        self.criterion = criteria.build(**self.config)
        self.criterion = self.criterion.to(self.device)

        self.optimizer = optimizers.build(self.model.parameters(),
                                          **self.config)
        self.scheduler = schedulers.build(self.optimizer, **self.config)

        self.save_dir = self.config['save_dir']
        os.makedirs(self.save_dir, exist_ok=True)

        log_keys = [
            'epoch', 'train_loss', 'train_iou', 'train_recall',
            'train_precision', 'train_f1', 'val_loss', 'val_iou', 'val_recall',
            'val_precision', 'val_f1'
        ]
        self.log = misc.log.TrainLog(
            self.save_dir,
            log_keys,
            save_keys=['val_loss', 'val_iou', 'val_f1'],
            save_modes=['min', 'max', 'max'],
        )

        self.start_epoch = 1
        self.final_epoch = self.config['epochs']
        if self.config['load_checkpoint']:
            self.__load_checkpoint()
        shutil.copy(config_path, os.path.join(self.save_dir, 'config.yml'))
Exemple #7
0
    def __init__(self, config_path, disable_tqdm=False):
        self.config = load_yaml.load(config_path)
        self.disable_tqdm = disable_tqdm

        misc.seeds.set_seeds(self.config['seed'], self.config['deterministic'])

        self.amp = self.config['amp']
        self.dataloader = datasets.build('test', self.config['dataset_args'])

        self.tta = True if len(self.config['dataset_args']['transforms_args'].keys()) \
                > 1 else False
        if self.tta:
            self.transforms = transforms.tta.TTA(
                self.config['dataset_args']['transforms_args'])

        self.device = torch.device('cuda')
        self.model = models.build(**self.config)
        self.model = self.model.to(self.device)
        self.softmax = torch.nn.Softmax(dim=1).to(self.device)

        self.classes = self.config['model_args']['classes']

        self.save_dir = self.config['save_dir']
        os.makedirs(self.save_dir, exist_ok=True)
Exemple #8
0
def main():

    import models

    assert not argv.orbiham, "it's called orbigraph now"

    if argv.find_ideals:
        find_ideals()
        return

    Gx, Gz, Hx, Hz = models.build()

    if argv.chainmap:
        do_chainmap(Gx, Gz)

    if argv.symmetry:
        do_symmetry(Gx, Gz, Hx, Hz)
        return
    
    #print shortstrx(Gx, Gz)
    if argv.report:
        print("Hz:")
        for i, h in enumerate(Hz):
            print(i, shortstr(h), h.sum())
    #print shortstr(find_stabilizers(Gx, Gz))

    Lz = find_logops(Gx, Hz)
    Lx = find_logops(Gz, Hx)
    #print "Lz:", shortstr(Lz)

    if Lz.shape[0]*Lz.shape[1]:
        print(Lz.shape, Gx.shape)
        check_commute(Lz, Gx)
        check_commute(Lz, Hx)

    Px = get_reductor(Hx) # projector onto complement of rowspan of Hx
    Pz = get_reductor(Hz) 

    Rz = [dot2(Pz, g) for g in Gz]
    Rz = array2(Rz)
    Rz = row_reduce(Rz, truncate=True)
    rz = len(Rz)

    n = Gx.shape[1]
    print("n =", n)
    if len(Lx):
        print("Lx Lz:")
        print(shortstrx(Lx, Lz))
    print("Hx:", len(Hx), "Hz:", len(Hz))
    print("Gx:", len(Gx), "Gz:", len(Gz))

    Rx = [dot2(Px, g) for g in Gx]
    Rx = array2(Rx)

    Rx = row_reduce(Rx, truncate=True)
    rx = len(Rx)
    print("Rx:", rx, "Rz:", rz)
    if argv.show:
        print(shortstrx(Rx, Rz))

    Qx = u_inverse(Rx)
    Pxt = Px.transpose()
    assert eq2(dot2(Rx, Qx), identity2(rx))
    assert eq2(dot2(Rx, Pxt), Rx)

    #print shortstr(dot2(Pxt, Qx))
    PxtQx = dot2(Pxt, Qx)
    lines = [shortstr(dot2(g, PxtQx)) for g in Gx]
    lines.sort()
    #print "PxtQx:"
    #for s in lines:
    #    print s
    #print "RzRxt"
    #print shortstr(dot2(Rz, Rx.transpose()))

    offset = argv.offset

    if len(Hz):
        Tx = find_errors(Hz, Lz, Rz)
    else:
        Tx = zeros2(0, n)

    if argv.dense:
        dense(**locals())
        return

    if argv.dense_full:
        dense_full(**locals())
        return

    if argv.show_delta:
        show_delta(**locals())
        return

    if argv.slepc:
        slepc(**locals())
        return

#    if argv.orbigraph:
#        from linear import orbigraph
#        orbigraph(**locals())
#        return

    v0 = None

#    excite = argv.excite
#    if excite is not None:
#        v0 = zeros2(n)
#        v0[excite] = 1

    verts = []
    lookup = {}
    for i, v in enumerate(span(Rx)): # XXX does not scale well
        if v0 is not None:
            v = (v+v0)%2
            v = dot2(Px, v)
        lookup[v.tobytes()] = i
        verts.append(v)
    print("span:", len(verts))
    assert len(lookup) == len(verts)

    mz = len(Gz)
    n = len(verts)

    if argv.lie:
        U = []
        for i, v in enumerate(verts):
            count = dot2(Gz, v).sum()
            Pxv = dot2(Px, v)
            assert count == dot2(Gz, Pxv).sum()
            U.append(mz - 2*count)
        uniq = list(set(U))
        uniq.sort(reverse=True)
        s = ', '.join("%d(%d)"%(val, U.count(val)) for val in uniq)
        print(s)
        print("sum:", sum(U))
        return
        

    if n <= 1024 and argv.solve:
        H = numpy.zeros((n, n))
        syndromes = []
        for i, v in enumerate(verts):
            syndromes.append(dot2(Gz, v))
            count = dot2(Gz, v).sum()
            Pxv = dot2(Px, v)
            assert count == dot2(Gz, Pxv).sum()
            H[i, i] = mz - 2*count
            for g in Gx:
                v1 = (g+v)%2
                v1 = dot2(Px, v1)
                j = lookup[v1.tobytes()]
                H[i, j] += 1
    
        if argv.showham:
            s = lstr2(H, 0).replace(',  ', ' ')
            s = s.replace(' 0', ' .')
            s = s.replace(', -', '-')
            print(s)
    
        vals, vecs = numpy.linalg.eigh(H)
        show_eigs(vals)

        if argv.show_partition:
            beta = argv.get("beta", 1.0)
            show_partition(vals, beta)

        if argv.orbigraph:
            if argv.symplectic:
                H1 = build_orbigraph(H, syndromes)
            else:
                H1 = build_orbigraph(H)
            print("orbigraph:")
            print(H1)
            vals, vecs = numpy.linalg.eig(H1)
            show_eigs(vals)

    elif argv.sparse:
        print("building H", end=' ')
        A = {} # adjacency
        U = [] # potential

        if offset is None:
            offset = mz + 1 # make H positive definite

        for i, v in enumerate(verts):
            if i%1000==0:
                write('.')
            count = dot2(Gz, v).sum()
            #H[i, i] = mz - 2*count
            U.append(offset + mz - 2*count)
            for g in Gx:
                v1 = (g+v)%2
                v1 = dot2(Px, v1)
                j = lookup[v1.tobytes()]
                A[i, j] = A.get((i, j), 0) + 1
    
        print("\nnnz:", len(A))

        if argv.lanczos:
            vals, vecs = do_lanczos(A, U)

        elif argv.orbigraph:
            vals, vecs = do_orbigraph(A, U)

        else:
            return

        vals -= offset # offset doesn't change vecs

        show_eigs(vals)

    elif argv.orbigraph:

        assert n<=1024

        H = numpy.zeros((n, n))
        syndromes = []
        for i, v in enumerate(verts):
            syndromes.append(dot2(Gz, v))
            count = dot2(Gz, v).sum()
            Pxv = dot2(Px, v)
            assert count == dot2(Gz, Pxv).sum()
            H[i, i] = mz - 2*count
            for g in Gx:
                v1 = (g+v)%2
                v1 = dot2(Px, v1)
                j = lookup[v1.tobytes()]
                H[i, j] += 1
    
        if argv.showham:
            s = lstr2(H, 0).replace(',  ', ' ')
            s = s.replace(' 0', ' .')
            s = s.replace(', -', '-')
            print(s)
    
        if argv.symplectic:
            H1 = build_orbigraph(H, syndromes)
        else:
            H1 = build_orbigraph(H)
Exemple #9
0
def visualize_segmentation(out, im):
    pano_img = np.array(out["panoptic"][1][0]).astype(float)
    pano_img *= 255. / pano_img.max()
    og_img = np.array(im)
    pano_img = pano_img.astype(np.uint8)
    seg_img = make_seg_img(out)
    summed = (0.7 * seg_img + 0.3 * pano_img).astype(np.uint8)
    # seg_img = (og_img * 0.1 + seg_img * 0.9).astype(np.uint8)
    output = np.hstack([og_img, pano_img, seg_img, summed])
    cv2.imshow("seg_img", output[:, :, ::-1])
    cv2.waitKey(0)


if __name__ == "__main__":
    args = parser.parse_args()
    detr, criterion, postprocessors = build(args)
    state_dict = torch.hub.load_state_dict_from_url(
        url=
        'https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth',
        map_location='cpu',
        check_hash=True)
    detr.load_state_dict(state_dict["model"])
    detr.eval()

    CLASSES = [
        'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
        'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
        'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
        'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A',
        'backpack', 'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase',
        'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',