Ejemplo n.º 1
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 2
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(
        log_file='log_train_pixel_link_%d_%d.log' % image_shape,
        log_path=FLAGS.train_dir, stdout=False, mode='a')

    batch_size = FLAGS.batch_size
    # batch_size_per_gpu = config.batch_size_per_gpu

    tf.summary.scalar('batch_size', batch_size)
    # tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('ld_train_on' + '_' + FLAGS.dataset_name + '_GPU_' + config.TRAIN_GPU_ID)

    from dataset import tfrecords_to_medicalimage
    train_dataset = tfrecords_to_medicalimage.get_split(FLAGS.dataset_split_name, FLAGS.dataset_dir, FLAGS.file_pattern, None, FLAGS.attribute_flag)
    # dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, train_dataset)
    val_dataset_dir = os.path.join(os.path.dirname(FLAGS.dataset_dir), 'val_tfrecords')
    if not os.path.exists(val_dataset_dir):
        val_dataset_dir = os.path.join(os.path.dirname(FLAGS.dataset_dir), 'val')
    if not os.path.exists(val_dataset_dir):
        print val_dataset_dir
        assert False
    val_dataset = tfrecords_to_medicalimage.get_split('val', val_dataset_dir, FLAGS.file_pattern, None,
                                                      FLAGS.attribute_flag)
    return train_dataset, val_dataset
Ejemplo n.º 3
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file='log_train_pixel_link_%d_%d.log' % image_shape,
                     log_path=FLAGS.train_dir,
                     stdout=False,
                     mode='a')

    config.load_config(FLAGS.train_dir)

    config.init_config(image_shape,
                       batch_size=FLAGS.batch_size,
                       weight_decay=FLAGS.weight_decay,
                       num_gpus=FLAGS.num_gpus)

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu

    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    util.proc.set_proc_name('train_pixel_link_on' + '_' + FLAGS.dataset_name)

    dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                          FLAGS.dataset_split_name,
                                          FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 4
0
def assess():
    config['user_action'] = "assess"
    print_config("start assess stage")
    form = AssessmentForm(assessname=config['assess_name'],
                          filename=config['input_csv_file'],
                          outputFolder=config['output_folder'],
                          reportName=config['output_report_name'],
                          reportFormat=config['output_report_name'],
                          assessType=config['assess_type'],
                          target=config['target_platform'])
    if form.validate_on_submit():
        flash(f'Start SQL Server Azure migration assessment ...', 'info')
        params = f"-AssessName {config['assess_name']} -InputFile {config['input_csv_file']} -OutputFolder {config['output_folder']} \
                    -ReportName {config['output_report_name']} -AssessType {config['assess_type']} -Target {config['target_platform']}"

        result = str(execute_script("catas.ps1", params))
        result = 'Success'
        if result[0:5] == "Error":
            errorMessage = result if len(result) <= 200 else result[0:200]
            flash(errorMessage, 'danger')
            #raise AssessError('SQL server host connection Assess failed, please check host name and credentials.')
        else:
            #flash(f"PowerShell script good {result}", 'success')
            flash(f'Assessment successful!', 'success')
        form = AssessmentForm(assessname=config['assess_name'],
                              filename=config['input_csv_file'],
                              outputFolder=config['output_folder'],
                              reportName=config['output_report_name'],
                              reportFormat=config['output_report_name'],
                              assessType=config['assess_type'],
                              target=config['target_platform'])

    return render_template('assess.html', title='Assess', form=form)
Ejemplo n.º 5
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.train_image_height, FLAGS.train_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    util.init_logger(log_file = 'log_train_seglink_%d_%d.log'%image_shape, log_path = FLAGS.train_dir, stdout = False, mode = 'a')
    
    #Init some config,not need to pay too much attention
    config.init_config(image_shape, 
                       batch_size = FLAGS.batch_size, 
                       weight_decay = FLAGS.weight_decay, 
                       num_gpus = FLAGS.num_gpus, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )

    batch_size = config.batch_size
    batch_size_per_gpu = config.batch_size_per_gpu
        
    tf.summary.scalar('batch_size', batch_size)
    tf.summary.scalar('batch_size_per_gpu', batch_size_per_gpu)

    #util.proc.set_proc_name(FLAGS.model_name + '_' + FLAGS.dataset_name)
    #Stpe 1: create dataset by xiaodong
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset)
    return dataset
Ejemplo n.º 6
0
def test_read():
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_config['ids']
    dataset_dir = '/media/dl-box/HDD3/ld/Documents/datasets/CITYSCAPES/train_tfrecords'
    dataset = read_tfrecords_dataset(dataset_dir, 'cityscapes', 19)
    from config import print_config
    print_config(dataset)
    batch_queue = create_dataset_batch_queue(dataset)
    image_tensor, mask_tensor, image_name_tensor, mask_name_tensor = batch_queue.dequeue()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        print("image tensor is ", image_tensor)
        for i in range(10):
            image_value, mask_value, image_name_value, mask_name_value = sess.run(
                [image_tensor, mask_tensor, image_name_tensor, mask_name_tensor])
            print(np.shape(image_value), np.shape(mask_value))
            cv2.imwrite('./%d_%s' % (i, image_name_value[0].decode('utf-8')), image_value[0])
            cv2.imwrite('./%d_%s' % (i, mask_name_value[0].decode('utf-8')), mask_value[0])

        coord.request_stop()
        coord.join(threads)
Ejemplo n.º 7
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)

    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)

    config.init_config(
        image_shape,
        batch_size=1,
        seg_conf_threshold=FLAGS.seg_conf_threshold,
        link_conf_threshold=FLAGS.link_conf_threshold,
        train_with_ignored=FLAGS.train_with_ignored,
        seg_loc_loss_weight=FLAGS.seg_loc_loss_weight,
        link_cls_loss_weight=FLAGS.link_cls_loss_weight,
    )

    util.proc.set_proc_name('eval_' + FLAGS.model_name + '_' +
                            FLAGS.dataset_name)
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name,
                                          FLAGS.dataset_split_name,
                                          FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset, print_to_file=False)

    return dataset
Ejemplo n.º 8
0
Archivo: eval.py Proyecto: yli96/DSEBM
def main(argv=None):
    config.print_config()

    if os.path.exists(FLAGS.dir_eval):
        shutil.rmtree(FLAGS.dir_eval)
    os.makedirs(FLAGS.dir_eval)

    eval()
Ejemplo n.º 9
0
def main():
    print_config()

    dataset = load_dataset(CONFIG['dataset'], CONFIG['dataset_path'])

    env = TextLocEnv(dataset.image_paths, dataset.bounding_boxes)
    m = CustomModel(10)
    vs = [m(env.reset())]
    g = c.build_computational_graph(vs)
    with open('graph.dot', 'w') as o:
        o.write(g.dump())
Ejemplo n.º 10
0
Archivo: train.py Proyecto: yli96/DSEBM
def main(argv=None):
    config.print_config()

    if os.path.exists(FLAGS.dir_train):
        shutil.rmtree(FLAGS.dir_train)
    os.makedirs(FLAGS.dir_train)

    if os.path.exists(FLAGS.dir_parameter):
        shutil.rmtree(FLAGS.dir_parameter)
    os.makedirs(FLAGS.dir_parameter)

    train()
def main():
    print_config()

    relative_paths = np.loadtxt(CONFIG['imagefile_path'], dtype=str)
    images_base_path = os.path.dirname(CONFIG['imagefile_path'])
    absolute_paths = [images_base_path + i.strip('.') for i in relative_paths]
    bboxes = np.load(CONFIG['boxfile_path'], allow_pickle=True)

    env = TextLocEnv(absolute_paths, bboxes, -1)
    m = CustomModel(10)
    vs = [m(env.reset())]
    g = c.build_computational_graph(vs)
    with open('graph.dot', 'w') as o:
        o.write(g.dump())
Ejemplo n.º 12
0
def main():
    print_config()

    relative_paths = np.loadtxt(CONFIG['imagefile_path'], dtype=str)
    images_base_path = os.path.dirname(CONFIG['imagefile_path'])
    absolute_paths = [images_base_path + i.strip('.') for i in relative_paths]
    bboxes = np.load(CONFIG['boxfile_path'], allow_pickle=True)

    env = TextLocEnv(absolute_paths, bboxes, -1)
    q_func = chainerrl.q_functions.SingleModelStateQFunctionWithDiscreteAction(
        CustomModel(9))
    optimizer = chainer.optimizers.Adam(eps=1e-2)
    optimizer.setup(q_func)
    replay_buffer = chainerrl.replay_buffer.ReplayBuffer(
        capacity=CONFIG['replay_buffer_capacity'])

    explorer = chainerrl.explorers.ConstantEpsilonGreedy(
        epsilon=0, random_action_func=env.action_space.sample)

    agent = chainerrl.agents.DQN(
        q_func,
        optimizer,
        replay_buffer,
        CONFIG['gamma'],
        explorer,
        gpu=CONFIG['gpu_id'],
        replay_start_size=CONFIG['replay_start_size'],
        update_interval=CONFIG['update_interval'],
        target_update_interval=CONFIG['target_update_interval'])

    agent.load(CONFIG['resultdir_path'] + '/best')
    actions = defaultdict(int)
    with open('iou.txt', 'w') as f:
        i = 0
        for j in range(100):
            obs = env.reset()
            done = False
            while (not done) and i < 100:
                #print(i,j)
                action = agent.act(obs)
                actions[ACTION_MEANINGS[action]] += 1
                obs, reward, done, info = env.step(action)

                print(ACTION_MEANINGS[action], reward, done, info)
                if done:
                    f.write(f'{env.iou}\n')
                #input()
                i += 1
Ejemplo n.º 13
0
def main():
    print_config()
    dataset = DataSet(config)

    data = dataset.data(train=True)

    dataset.saveDataSet(data[0], data[1], 'Train_{}'.format(config.nTrain))

    data = dataset.data(train=False)

    dataset.saveDataSet(data[0], data[1], 'Test_{}'.format(config.nTest))
    print('Datasets were saved to disk!')

    train_matrices, train_labels = dataset.loadDataSet('Train_{}'.format(
        config.nTrain))
    test_matrices, test_labels = dataset.loadDataSet('Test_{}'.format(
        config.nTest))
    print('Datasets were loaded from disk!')
    models = []
    addDenseLayerClassification(models,
                                inputDim=(config.nCells, config.nMuts),
                                nameSuffix="sg",
                                hiddenSize=100,
                                nLayers=2,
                                dropOutRate=0.2,
                                dropOutFirst=False,
                                dropOutAfterFirst=True,
                                activation="sigmoid",
                                useSoftmax=False)

    for name, model in models:
        history = LossAccHistory(test_matrices, test_labels)
        model.fit(train_matrices,
                  train_labels,
                  epochs=config.nb_epoch,
                  verbose=1,
                  callbacks=[history])

        df = pd.DataFrame(index = ['epoch_{}'.format(e) for e in range(config.nb_epoch)], \
            columns = ['train_acc', 'test_acc', 'train_loss', 'test_loss'])

        df['train_acc'] = history.train_acc
        df['test_acc'] = history.test_acc
        df['train_loss'] = history.train_loss
        df['test_loss'] = history.test_loss
        df.to_csv(config.output_dir + '/{name}_{nCells}x{nMuts}.csv'.format(
            name=name, nCells=config.nCells, nMuts=config.nMuts),
                  sep=',')
Ejemplo n.º 14
0
class AssessmentForm(FlaskForm):
    assessname = StringField('Assessment name',
                           validators=[DataRequired(), Length(min=3, max=50)], default=config['assess_name'])
    filename = StringField('Put your input server CSV file name including absolute file path', 
                           validators=[DataRequired(), Length(min=5, max=200)], default=config['input_csv_file'])
    #filename = FileField('Select Your File', validators=[FileRequired(), FileAllowed(['csv'], 'CSV file only!')])
    outputFolder = StringField('Put your output destination path here (optional)', 
                           validators=[DataRequired(), Length(min=5, max=200)], default=config['output_folder'])
    reportName = StringField('Put your output report name here (optional)', 
                           validators=[DataRequired(), Length(min=3, max=50)], default=config['output_report_name'])
    reportFormat = SelectField('Output report format', [DataRequired()],
                        choices=[('dma', 'DMA'),
                                 ('json', 'JSON'),
                                 ('csv', 'CSV'),
                                 ('all', 'DMA and JSON')], default=config['output_report_format'])
    assessType = SelectField('Assessment type', [DataRequired()],
                        choices=[('Both', 'SQL Feature Parity and Compatibility Level'),
                                 ('Feature', 'SQL Feature Parity'),                  
                                 ('Compat', 'SQL Compatibility Level'),
                                 ('Evaluate', 'SQL Target Evaluation'),
                                 ('SSIS', 'SQL Server Integration Services')], default=config['assess_type'])
    target = SelectField('Target platform', [DataRequired()],
                        choices=[('SQLdb', 'Azure SQL Database'),
                                 ('SQLmi', 'Azure SQL Managed Instance'),
                                 ('SqlServer2012', 'SQL Server 2012'),
                                 ('SqlServer2014', 'SQL Server 2014'),
                                 ('SqlServer2016', 'SQL Server 2016'),
                                 ('SqlServerLinux2017', 'SQL Server 2017 Linux'),
                                 ('SqlServerWindows2017', 'SQL Server 2017 Windows'),
                                 ('SqlServerLinux2019', 'SQL Server 2019 Linux'),
                                 ('SqlServerWindows2019', 'SQL Server 2019 Windows')], default=config['target_platform'])
    print_config("Assessment form")
    submit = SubmitField('Assess')

    def validate_assessname(self, assessname):
        config['assess_name'] = assessname.data

    def validate_filename(self, filename):
        file = check_csv_file(filename.data)    # If no issue, return an empty string
        if file:
            raise ValidationError(f"{file} Please choose a different one.")
        config['input_csv_file'] = filename.data

    def validate_outputFolder(self, outputFolder):
        config['output_folder'] = outputFolder.data

    def validate_reportName(self, reportName):
        config['output_report_name'] = reportName.data

    def validate_reportFormat(self, reportFormat):
        config['output_report_format'] = reportFormat.data

    def validate_assessType(self, assessType):
        config['assess_type'] = assessType.data
        if (self.assessType.data == 'SSIS'):
            config['output_report_format'] = 'json'
            self.reportFormat.data == 'json'

    def validate_target(self, target):
        config['target_platform'] = target.data
Ejemplo n.º 15
0
def main(config):




if __name__ == "__main__":

    # ----------------------------------------
    # Parse configuration
    config, unparsed = get_config()
    # If we have unparsed arguments, print usage and exit
    if len(unparsed) > 0:
        print_usage()
        exit(1)
    print_config(config)
    main(config)
Ejemplo n.º 16
0
    def test_ctmrg_Ladders_VBS1x2(self):
        cfg.configure(args)
        cfg.print_config()
        torch.set_num_threads(args.omp_cores)

        model = coupledLadders.COUPLEDLADDERS_D2_BIPARTITE(alpha=args.alpha)

        state = read_ipeps(args.instate)

        def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
            with torch.no_grad():
                if not history:
                    history = []
                e_curr = model.energy_2x1_1x2(state, env)
                history.append([e_curr.item()])

                if len(history) > 1 and abs(history[-1][0] - history[-2][0]
                                            ) < ctm_args.ctm_conv_tol:
                    return True, history
            return False, history

        ctm_env_init = ENV(args.chi, state)
        init_env(state, ctm_env_init)

        ctm_env_init, *ctm_log = ctmrg.run(state,
                                           ctm_env_init,
                                           conv_check=ctmrg_conv_energy)

        e_curr0 = model.energy_2x1_1x2(state, ctm_env_init)
        obs_values0, obs_labels = model.eval_obs(state, ctm_env_init)
        obs_dict = dict(zip(obs_labels, obs_values0))

        eps = 1.0e-12
        self.assertTrue(abs(e_curr0 - (-0.375)) < eps)
        for coord, site in state.sites.items():
            self.assertTrue(obs_dict[f"m{coord}"] < eps, msg=f"m{coord}")
            self.assertTrue(obs_dict[f"SS2x1{coord}"] < eps,
                            msg=f"SS2x1{coord}")
            for l in ["sz", "sp", "sm"]:
                self.assertTrue(abs(obs_dict[f"{l}{coord}"]) < eps,
                                msg=f"{l}{coord}")
        for coord in [(0, 0)]:
            self.assertTrue(abs(obs_dict[f"SS1x2{coord}"] - (-0.75)) < eps,
                            msg=f"SS1x2{coord}")
Ejemplo n.º 17
0
def do_config(args):
    func = getattr(args, 'subparser', None)
    if func is not None:
        if func == 'basex':
            do_basexconf(args)
        elif func == 'oracle':
            do_oracleconf(args)
        elif func == 'postgresql':
            do_postgreconf(args)
        elif func == 'sqlite':
            do_sqliteconf(args)
        elif func == 'sqlserver':
            do_sqlserverconf(args)
        elif func == 'mysql':
            do_mysqlconf(args)
        elif func == 'rel':
            do_relconf(args)
    else:
        if args.save:
            config.update_config()
        else:
            config.print_config()
Ejemplo n.º 18
0
def config_initialization():
    # image shape and feature layers shape inference
    image_shape = (FLAGS.eval_image_height, FLAGS.eval_image_width)
    
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')
    tf.logging.set_verbosity(tf.logging.DEBUG)
    
    config.init_config(image_shape, 
                       batch_size = 1, 
                       seg_conf_threshold = FLAGS.seg_conf_threshold,
                       link_conf_threshold = FLAGS.link_conf_threshold, 
                       train_with_ignored = FLAGS.train_with_ignored,
                       seg_loc_loss_weight = FLAGS.seg_loc_loss_weight, 
                       link_cls_loss_weight = FLAGS.link_cls_loss_weight, 
                       )
        
    
    util.proc.set_proc_name('eval_' + FLAGS.model_name + '_' + FLAGS.dataset_name )
    dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
    config.print_config(FLAGS, dataset, print_to_file = False)
    
    return dataset
Ejemplo n.º 19
0
def validate():
    config['user_action'] = "validate"
    print_config('start validation')
    form = ValidationForm(assessname=config['assess_name'],
                          filename=config['input_csv_file'],
                          validationType=config['validation_type'])
    if request.method == 'POST':
        print("This is a POST.")
        if form.validate_on_submit():
            assessName = form.assessname.data
            filename = form.filename.data
            #filename = secure_filename(form.filename.data.filename)
            print(f"csv file name: {filename}")
            validationType = form.validationType.data
            print(f"you chose: {validationType}")
            result = str(
                execute_script(
                    "catas.ps1",
                    f"-AssessName {assessName} -InputFile {filename} -{validationType}"
                ))
            if result[0:5] == "Error":
                errorMessage = result if len(result) <= 200 else result[0:200]
                flash(errorMessage, 'danger')
                #raise ValidationError('SQL server host connection validation failed, please check host name and credentials.')
            else:
                #flash(f"PowerShell script good {result}", 'success')
                flash(f'Validation successful for {filename}!', 'success')
            form = ValidationForm(assessname=assessName,
                                  filename=filename,
                                  validationType=validationType)

            #return redirect(url_for('validate'))
            #return render_template('validate.html', title='Validate', form=form)
    else:
        print("This is not a POST.")
    #print(form.filename.data)
    return render_template('validate.html', title='Validate', form=form)
Ejemplo n.º 20
0
class ValidationForm(FlaskForm):
    assessname = StringField('Assessment name',
                           validators=[DataRequired(), Length(min=3, max=50)])
    filename = StringField('Put your input server CSV file name including absolute file path', 
                           validators=[DataRequired(), Length(min=5, max=200)])
    #filename = FileField('Select Your File', validators=[FileRequired(), FileAllowed(['csv'], 'CSV file only!')])
    validationType = SelectField('Validation type', [DataRequired()],
                        choices=[('ValidateHost', 'SQL Server Host Connection'),
                                 ('ValidateSql', 'SQL Server Instance Connection'),
                                 ('ValidateBoth', 'SQL Server Host and Instance Connection')])
    print_config("validation form")
    submit = SubmitField('Validate')

    def validate_assessname(self, assessname):
        config['assess_name'] = assessname.data

    def validate_filename(self, filename):
        file = check_csv_file(filename.data)    # If no issue, return an empty string
        if file:
            raise ValidationError(f"{file} Please choose a different one.")
        config['input_csv_file'] = filename.data

    def validate_validationType(self, validationType):
        config['validation_type'] = validationType.data
Ejemplo n.º 21
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, hz_stag=args.hz_stag, \
        delta_zz=args.delta_zz)
    energy_f = model.energy_1x1_lowmem

    # initialize the ipeps
    if args.instate != None:
        state = read_ipeps_c4v(args.instate)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
        # state.sites[(0,0)]= state.sites[(0,0)]/torch.max(torch.abs(state.sites[(0,0)]))
        state.sites[(0, 0)] = state.site() / state.site().norm()
    elif args.opt_resume is not None:
        state = IPEPS_C4V(torch.tensor(0.))
        state.load_checkpoint(args.opt_resume)
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim
        A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype, device=cfg.global_args.device)
        # A= make_c4v_symm(A)
        # A= A/torch.max(torch.abs(A))
        A = A / A.norm()
        state = IPEPS_C4V(A)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    @torch.no_grad()
    def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args):
        if not history:
            history = dict({"log": []})
        rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
        dist = float('inf')
        if len(history["log"]) > 0:
            dist = torch.dist(rdm2x1, history["rdm"], p=2).item()
        history["rdm"] = rdm2x1
        history["log"].append(dist)
        if dist < ctm_args.ctm_conv_tol or len(
                history["log"]) >= ctm_args.ctm_max_iter:
            log.info({
                "history_length": len(history['log']),
                "history": history['log']
            })
            return True, history
        return False, history

    state_sym = to_ipeps_c4v(state)
    ctm_env = ENV_C4V(args.chi, state_sym)
    init_env(state_sym, ctm_env)

    ctm_env, *ctm_log = ctmrg_c4v.run(state_sym,
                                      ctm_env,
                                      conv_check=ctmrg_conv_f)
    loss = energy_f(state_sym, ctm_env)
    obs_values, obs_labels = model.eval_obs(state_sym, ctm_env)
    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values]))

    def loss_fn(state, ctm_env_in, opt_context):
        ctm_args = opt_context["ctm_args"]
        opt_args = opt_context["opt_args"]
        # 0) preprocess
        # create a copy of state, symmetrize and normalize making all operations
        # tracked. This does not "overwrite" the parameters tensors, living outside
        # the scope of loss_fn
        state_sym = to_ipeps_c4v(state, normalize=True)

        # possibly re-initialize the environment
        if opt_args.opt_ctm_reinit:
            init_env(state_sym, ctm_env_in)

        # 1) compute environment by CTMRG
        ctm_env_out, *ctm_log = ctmrg_c4v.run(state_sym,
                                              ctm_env_in,
                                              conv_check=ctmrg_conv_f,
                                              ctm_args=ctm_args)

        # 2) evaluate loss with converged environment
        loss = energy_f(state_sym, ctm_env_out, force_cpu=args.force_cpu)

        return (loss, ctm_env_out, *ctm_log)

    def _to_json(l):
        re = [l[i, 0].item() for i in range(l.size()[0])]
        im = [l[i, 1].item() for i in range(l.size()[0])]
        return dict({"re": re, "im": im})

    @torch.no_grad()
    def obs_fn(state, ctm_env, opt_context):
        if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \
            or not "line_search" in opt_context.keys():
            state_sym = to_ipeps_c4v(state, normalize=True)
            epoch = len(opt_context["loss_history"]["loss"])
            loss = opt_context["loss_history"]["loss"][-1]
            obs_values, obs_labels = model.eval_obs(state_sym, ctm_env)
            print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]+\
                [f"{torch.max(torch.abs(state.site((0,0))))}"]))

            if args.top_freq > 0 and epoch % args.top_freq == 0:
                coord_dir_pairs = [((0, 0), (1, 0))]
                for c, d in coord_dir_pairs:
                    # transfer operator spectrum
                    print(f"TOP spectrum(T)[{c},{d}] ", end="")
                    l = transferops_c4v.get_Top_spec_c4v(
                        args.top_n, state_sym, ctm_env)
                    print("TOP " + json.dumps(_to_json(l)))

    def post_proc(state, ctm_env, opt_context):
        symm, max_err = verify_c4v_symm_A1(state.site())
        # print(f"post_proc {symm} {max_err}")
        if not symm:
            # force symmetrization outside of autograd
            with torch.no_grad():
                symm_site = make_c4v_symm(state.site())
                # we **cannot** simply normalize the on-site tensors, as the LBFGS
                # takes into account the scale
                # symm_site= symm_site/torch.max(torch.abs(symm_site))
                state.sites[(0, 0)].copy_(symm_site)

    # optimize
    # optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn, post_proc=post_proc)
    optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn)

    # compute final observables for the best variational state
    outputstatefile = args.out_prefix + "_state.json"
    state = read_ipeps_c4v(outputstatefile)
    ctm_env = ENV_C4V(args.chi, state)
    init_env(state, ctm_env)
    ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_f)
    opt_energy = energy_f(state, ctm_env)
    obs_values, obs_labels = model.eval_obs(state, ctm_env)
    print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] +
                    [f"{v}" for v in obs_values]))
Ejemplo n.º 22
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model = j1j2.J1J2(j1=args.j1, j2=args.j2)

    # initialize an ipeps
    # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice
    # coord into one of coordinates within unit-cell of iPEPS ansatz
    if args.tiling == "BIPARTITE":

        def lattice_to_site(coord):
            vx = (coord[0] + abs(coord[0]) * 2) % 2
            vy = abs(coord[1])
            return ((vx + vy) % 2, 0)
    elif args.tiling == "2SITE":

        def lattice_to_site(coord):
            vx = (coord[0] + abs(coord[0]) * 2) % 2
            vy = (coord[1] + abs(coord[1]) * 1) % 1
            return (vx, vy)
    elif args.tiling == "4SITE":

        def lattice_to_site(coord):
            vx = (coord[0] + abs(coord[0]) * 2) % 2
            vy = (coord[1] + abs(coord[1]) * 2) % 2
            return (vx, vy)
    elif args.tiling == "8SITE":

        def lattice_to_site(coord):
            shift_x = coord[0] + 2 * (coord[1] // 2)
            vx = shift_x % 4
            vy = coord[1] % 2
            return (vx, vy)
    else:
        raise ValueError("Invalid tiling: "+str(args.tiling)+" Supported options: "\
            +"BIPARTITE, 2SITE, 4SITE, 8SITE")

    # initialize an ipeps
    if args.instate != None:
        state = read_ipeps(args.instate, vertexToSite=lattice_to_site)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim

        A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)
        B = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)

        # normalization of initial random tensors
        A = A / torch.max(torch.abs(A))
        B = B / torch.max(torch.abs(B))

        sites = {(0, 0): A, (1, 0): B}

        if args.tiling == "4SITE":
            C= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
                dtype=cfg.global_args.dtype,device=cfg.global_args.device)
            D= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
                dtype=cfg.global_args.dtype,device=cfg.global_args.device)
            sites[(0, 1)] = C / torch.max(torch.abs(C))
            sites[(1, 1)] = D / torch.max(torch.abs(D))

        if args.tiling == "8SITE":
            E= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
                dtype=cfg.global_args.dtype,device=cfg.global_args.device)
            F= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
                dtype=cfg.global_args.dtype,device=cfg.global_args.device)
            G= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
                dtype=cfg.global_args.dtype,device=cfg.global_args.device)
            H= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
                dtype=cfg.global_args.dtype,device=cfg.global_args.device)
            sites[(2, 0)] = E / torch.max(torch.abs(E))
            sites[(3, 0)] = F / torch.max(torch.abs(F))
            sites[(2, 1)] = G / torch.max(torch.abs(G))
            sites[(3, 1)] = H / torch.max(torch.abs(H))

        state = IPEPS(sites, vertexToSite=lattice_to_site)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    # 2) select the "energy" function
    if args.tiling == "BIPARTITE" or args.tiling == "2SITE":
        energy_f = model.energy_2x2_2site
    elif args.tiling == "4SITE":
        energy_f = model.energy_2x2_4site
    elif args.tiling == "8SITE":
        energy_f = model.energy_2x2_8site
    else:
        raise ValueError("Invalid tiling: "+str(args.tiling)+" Supported options: "\
            +"BIPARTITE, 2SITE, 4SITE")

    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        with torch.no_grad():
            if not history:
                history = []
            e_curr = energy_f(state, env)
            obs_values, obs_labels = model.eval_obs(state, env)
            history.append([e_curr.item()] + obs_values)
            print(", ".join([f"{len(history)}", f"{e_curr}"] +
                            [f"{v}" for v in obs_values]))

            if len(history) > 1 and abs(
                    history[-1][0] - history[-2][0]) < ctm_args.ctm_conv_tol:
                return True, history
        return False, history

    ctm_env_init = ENV(args.chi, state)
    init_env(state, ctm_env_init)
    print(ctm_env_init)

    e_curr0 = energy_f(state, ctm_env_init)
    obs_values0, obs_labels = model.eval_obs(state, ctm_env_init)

    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0]))

    ctm_env_init, *ctm_log = ctmrg.run(state,
                                       ctm_env_init,
                                       conv_check=ctmrg_conv_energy)

    # 6) compute final observables
    e_curr0 = energy_f(state, ctm_env_init)
    obs_values0, obs_labels = model.eval_obs(state, ctm_env_init)
    history, t_ctm, t_obs = ctm_log
    print("\n")
    print(", ".join(["epoch", "energy"] + obs_labels))
    print("FINAL " + ", ".join([f"{e_curr0}"] + [f"{v}" for v in obs_values0]))
    print(f"TIMINGS ctm: {t_ctm} conv_check: {t_obs}")

    # 7) ----- additional observables ---------------------------------------------
    corrSS = model.eval_corrf_SS((0, 0), (1, 0), state, ctm_env_init,
                                 args.corrf_r)
    print("\n\nSS[(0,0),(1,0)] r " +
          " ".join([label for label in corrSS.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()]))

    corrSS = model.eval_corrf_SS((0, 0), (0, 1), state, ctm_env_init,
                                 args.corrf_r)
    print("\n\nSS[(0,0),(0,1)] r " +
          " ".join([label for label in corrSS.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()]))

    # environment diagnostics
    print("\n")
    for c_loc, c_ten in ctm_env_init.C.items():
        u, s, v = torch.svd(c_ten, compute_uv=False)
        print(f"spectrum C[{c_loc}]")
        for i in range(args.chi):
            print(f"{i} {s[i]}")

    # transfer operator spectrum
    site_dir_list = [((0, 0), (1, 0)), ((0, 0), (0, 1))]
    for sdp in site_dir_list:
        print(f"\n\nspectrum(T)[{sdp[0]},{sdp[1]}]")
        l = transferops.get_Top_spec(args.top_n, *sdp, state, ctm_env_init)
        for i in range(l.size()[0]):
            print(f"{i} {l[i,0]} {l[i,1]}")
Ejemplo n.º 23
0
def main():
    p = OptionParser(usage="usage: %prog [options] [name] [version]",
                     description=__doc__)

    p.add_option("--config",
                 action="store_true",
                 help="display the configuration and exit")

    p.add_option('-f', "--force",
                 action="store_true",
                 help="force install the main package "
                      "(not it's dependencies, see --forceall)")

    p.add_option("--forceall",
                 action="store_true",
                 help="force install of all packages "
                      "(i.e. including dependencies)")

    p.add_option('-i', "--info",
                 action="store_true",
                 help="show information about a package")

    p.add_option('-l', "--list",
                 action="store_true",
                 help="list the packages currently installed on the system")

    p.add_option('-n', "--dry-run",
                 action="store_true",
                 help="show what would have been downloaded/removed/installed")

    p.add_option('-N', "--no-deps",
                 action="store_true",
                 help="neither download nor install dependencies")

    p.add_option("--remove",
                 action="store_true",
                 help="remove a package")

    p.add_option('-s', "--search",
                 action="store_true",
                 help="search the index in the repo (chain) of packages "
                      "and display versions available.")

    p.add_option('-v', "--verbose", action="store_true")

    p.add_option('--version', action="store_true")

    p.add_option("--whats-new",
                 action="store_true",
                 help="display to which installed packages updates are "
                      "available")

    opts, args = p.parse_args()

    if len(args) > 0 and opts.config:
        p.error("Option takes no arguments")

    if opts.force and opts.forceall:
        p.error("Options --force and --forceall exclude each ohter")

    pat = None
    if (opts.list or opts.search) and args:
        pat = re.compile(args[0], re.I)

    if opts.version:                              #  --version
        from enstaller import __version__
        print "IronPkg version:", __version__
        return

    if opts.config:                               #  --config
        config.print_config()
        return

    if config.get_path() is None:
        # create config file if it dosn't exist
        config.write()

    conf = config.read()                          #  conf

    global dry_run, version                       #  set globals
    dry_run = opts.dry_run
    version = opts.version

    if opts.list:                                 #  --list
        print_installed(pat)
        return

    c = Chain(conf['IndexedRepos'], verbose)      #  init chain

    if opts.search:                               #  --search
        search(c, pat)
        return

    if opts.info:                                 #  --info
        if len(args) != 1:
            p.error("Option requires one argument (name of package)")
        info_option(c, canonical(args[0]))
        return

    if opts.whats_new:                            # --whats-new
        if args:
            p.error("Option requires no arguments")
        whats_new(c)
        return

    if len(args) == 0:
        p.error("Requirement (name and optional version) missing")
    if len(args) > 2:
        p.error("A requirement is a name and an optional version")
    req = Req(' '.join(args))

    if opts.remove:                               #  --remove
        remove_req(req)
        return

    dists = get_dists(c, req,                     #  dists
                      recur=not opts.no_deps)

    # Warn the user about packages which depend on what will be updated
    depend_warn([dist_naming.filename_dist(d) for d in dists])

    # Packages which are installed currently
    inst = set(egginst.get_installed())

    # These are the packahes which are being excluded from being installed
    if opts.forceall:
        exclude = set()
    else:
        exclude = set(inst)
        if opts.force:
            exclude.discard(dist_naming.filename_dist(dists[-1]))

    # Fetch distributions
    if not isdir(conf['local']):
        os.makedirs(conf['local'])
    for dist in iter_dists_excl(dists, exclude):
        c.fetch_dist(dist, conf['local'],
                     check_md5=opts.force or opts.forceall,
                     dry_run=dry_run)

    # Remove packages (in reverse install order)
    for dist in dists[::-1]:
        fn = dist_naming.filename_dist(dist)
        if fn in inst:
            # if the distribution (which needs to be installed) is already
            # installed don't remove it
            continue
        cname = cname_fn(fn)
        for fn_inst in inst:
            if cname == cname_fn(fn_inst):
                egginst_remove(fn_inst)

    # Install packages
    installed_something = False
    for dist in iter_dists_excl(dists, exclude):
        installed_something = True
        egginst_install(conf, dist)

    if not installed_something:
        print "No update necessary, %s is up-to-date." % req
        print_installed_info(req.name)
Ejemplo n.º 24
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model = jq.JQ(j1=args.j1, q=args.q)

    # initialize an ipeps
    # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice
    # coord into one of coordinates within unit-cell of iPEPS ansatz

    if args.instate != None:
        state = read_ipeps(args.instate)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim

        A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)
        B = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)
        C = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)
        D = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)

        sites = {(0, 0): A, (1, 0): B, (0, 1): C, (1, 1): D}

        for k in sites.keys():
            sites[k] = sites[k] / torch.max(torch.abs(sites[k]))
        state = IPEPS(sites, lX=2, lY=2)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        with torch.no_grad():
            if not history:
                history = []
            e_curr = model.energy_2x2_4site(state, env)
            obs_values, obs_labels = model.eval_obs(state, env)
            history.append([e_curr.item()] + obs_values)
            print(", ".join([f"{len(history)}", f"{e_curr}"] +
                            [f"{v}" for v in obs_values]))

            if len(history) > 1 and abs(
                    history[-1][0] - history[-2][0]) < ctm_args.ctm_conv_tol:
                return True, history
        return False, history

    ctm_env_init = ENV(args.chi, state)
    init_env(state, ctm_env_init)
    print(ctm_env_init)

    e_curr0 = model.energy_2x2_4site(state, ctm_env_init)
    obs_values0, obs_labels = model.eval_obs(state, ctm_env_init)

    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0]))

    ctm_env_init, *ctm_log = ctmrg.run(state,
                                       ctm_env_init,
                                       conv_check=ctmrg_conv_energy)

    # ----- S(0).S(r) -----
    site_dir_list = [((0, 0), (1, 0)), ((0, 0), (0, 1)), ((1, 1), (1, 0)),
                     ((1, 1), (0, 1))]
    for sdp in site_dir_list:
        corrSS = model.eval_corrf_SS(*sdp, state, ctm_env_init, args.corrf_r)
        print(f"\n\nSS[{sdp[0]},{sdp[1]}] r " +
              " ".join([label for label in corrSS.keys()]))
        for i in range(args.corrf_r):
            print(f"{i} " +
                  " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()]))

    # ----- (S(0).S(x))(S(rx).S(rx+x)) -----
    for sdp in site_dir_list:
        corrDD = model.eval_corrf_DD_H(*sdp, state, ctm_env_init, args.corrf_r)
        print(f"\n\nDD[{sdp[0]},{sdp[1]}] r " +
              " ".join([label for label in corrDD.keys()]))
        for i in range(args.corrf_r):
            print(f"{i} " +
                  " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()]))

    # ----- (S(0).S(y))(S(rx).S(rx+y)) -----
    for sdp in site_dir_list:
        corrDD_V = model.eval_corrf_DD_V(*sdp, state, ctm_env_init,
                                         args.corrf_r)
        print(f"\n\nDD_V[{sdp[0]},{sdp[1]}] r " +
              " ".join([label for label in corrDD_V.keys()]))
        for i in range(args.corrf_r):
            print(f"{i} " + " ".join(
                [f"{corrDD_V[label][i]}" for label in corrDD_V.keys()]))

    # environment diagnostics
    for c_loc, c_ten in ctm_env_init.C.items():
        u, s, v = torch.svd(c_ten, compute_uv=False)
        print(f"\n\nspectrum C[{c_loc}]")
        for i in range(args.chi):
            print(f"{i} {s[i]}")

    # transfer operator spectrum
    for sdp in site_dir_list:
        print(f"\n\nspectrum(T)[{sdp[0]},{sdp[1]}]")
        l = transferops.get_Top_spec(args.top_n, *sdp, state, ctm_env_init)
        for i in range(l.size()[0]):
            print(f"{i} {l[i,0]} {l[i,1]}")
Ejemplo n.º 25
0
def main():
    try:
        user_base = site.USER_BASE
    except AttributeError:
        user_base = abs_expanduser('~/.local')
    setup_handlers()

    p = ArgumentParser(description=__doc__)
    p.add_argument('cnames', metavar='NAME', nargs='*',
                   help='package(s) to work on')
    p.add_argument("--add-url", metavar='URL',
                   help="add a repository URL to the configuration file")
    p.add_argument("--config", action="store_true",
                   help="display the configuration and exit")
    p.add_argument('-f', "--force", action="store_true",
                   help="force install the main package "
                        "(not it's dependencies, see --forceall)")
    p.add_argument("--forceall", action="store_true",
                   help="force install of all packages "
                        "(i.e. including dependencies)")
    p.add_argument("--hook", action="store_true",
                   help="don't install into site-packages (experimental)")
    p.add_argument("--imports", action="store_true",
                   help="show which packages can be imported")
    p.add_argument('-i', "--info", action="store_true",
                   help="show information about a package")
    p.add_argument("--log", action="store_true", help="print revision log")
    p.add_argument('-l', "--list", action="store_true",
                   help="list the packages currently installed on the system")
    p.add_argument('-n', "--dry-run", action="store_true",
               help="show what would have been downloaded/removed/installed")
    p.add_argument('-N', "--no-deps", action="store_true",
                   help="neither download nor install dependencies")
    p.add_argument("--env", action="store_true",
                   help="based on the configuration, display how to set the "
                        "some environment variables")
    p.add_argument("--prefix", metavar='PATH',
                   help="install prefix (disregarding of any settings in "
                        "the config file)")
    p.add_argument("--proxy", metavar='URL', help="use a proxy for downloads")
    p.add_argument("--remove", action="store_true", help="remove a package")
    p.add_argument("--revert", metavar="REV",
                   help="revert to a previous set of packages")
    p.add_argument('-s', "--search", action="store_true",
                   help="search the index in the repo (chain) of packages "
                        "and display versions available.")
    p.add_argument("--sys-config", action="store_true",
                   help="use <sys.prefix>/.enstaller4rc (even when "
                        "~/.enstaller4rc exists)")
    p.add_argument("--sys-prefix", action="store_true",
                   help="use sys.prefix as the install prefix")
    p.add_argument("--user", action="store_true",
               help="install into user prefix, i.e. --prefix=%r" % user_base)
    p.add_argument("--userpass", action="store_true",
                   help="change EPD authentication in configuration file")
    p.add_argument('-v', "--verbose", action="store_true")
    p.add_argument('--version', action="version",
                   version='enstaller version: ' + __version__)
    p.add_argument("--whats-new", action="store_true",
                   help="display to which installed packages updates are "
                        "available")
    args = p.parse_args()

    if len(args.cnames) > 0 and (args.config or args.env or args.userpass or
                                 args.revert or args.log or args.whats_new):
        p.error("Option takes no arguments")

    if args.user:
        args.prefix = user_base

    if args.prefix and args.sys_prefix:
        p.error("Options --prefix and --sys-prefix exclude each ohter")

    if args.force and args.forceall:
        p.error("Options --force and --forceall exclude each ohter")

    pat = None
    if (args.list or args.search) and args.cnames:
        pat = re.compile(args.cnames[0], re.I)

    if args.sys_prefix:
        prefix = sys.prefix
    elif args.prefix:
        prefix = args.prefix
    else:
        prefix = config.get('prefix', sys.prefix)

    if prefix == sys.prefix:
        prefixes = [sys.prefix]
    else:
        prefixes = [prefix, sys.prefix]

    if args.env:                                  # --env
        env_option(prefixes)
        return

    if args.log:                                  # --log
        History(prefix).print_log()
        return

    if args.sys_config:                           # --sys-config
        config.get_path = lambda: config.system_config_path

    if args.config:                               # --config
        config.print_config()
        return

    if args.userpass:                             # --userpass
        username, password = config.input_auth()
        config.change_auth(username, password)
        return

    if args.list:                                 # --list
        list_option(prefixes, args.hook, pat)
        return

    if args.proxy:                                # --proxy
        setup_proxy(args.proxy)
    elif config.get('proxy'):
        setup_proxy(config.get('proxy'))
    else:
        setup_proxy()

    dry_run = args.dry_run
    verbose = args.verbose

    if config.get('use_resource_index'):
        from resource import Resources
        res = Resources('http://beta.enthought.com/webservice/',
                        verbose=verbose)
        enst = res.enst
        enst.dry_run = dry_run
        enst.prefixes = prefixes
    else:
        enpkg = Enpkg(config.get('IndexedRepos'), config.get_auth(),
                      prefixes=prefixes, hook=args.hook,
                      verbose=args.verbose)

    if args.imports:                              # --imports
        assert not args.hook
        imports_option(enpkg, pat)
        return

    if args.add_url:                              # --add-url
        add_url(args.add_url, args.verbose)
        return

    if args.revert:                               # --revert
        revert(enst, args.revert)
        return

    if args.search:                               # --search
        search(enpkg, pat)
        return

    if args.info:                                 # --info
        if len(args.cnames) != 1:
            p.error("Option requires one argument (name of package)")
        info_option(enst, canonical(args.cnames[0]))
        return

    if args.whats_new:                            # --whats-new
        whats_new(enst)
        return

    if len(args.cnames) == 0:
        p.error("Requirement(s) missing")
    elif len(args.cnames) == 2:
        pat = re.compile(r'\d+\.\d+')
        if pat.match(args.cnames[1]):
            args.cnames = ['-'.join(args.cnames)]

    reqs = []
    for arg in args.cnames:
        if '-' in arg:
            name, version = arg.split('-', 1)
            reqs.append(Req(name + ' ' + version))
        else:
            reqs.append(Req(arg))

    if verbose:
        print "Requirements:"
        for req in reqs:
            print '    %r' % req
        print

    print "prefix:", prefix

    with History(prefix):
        for req in reqs:
            if args.remove:                           # --remove
                remove_req(enpkg, req)
            else:
                install_req(enpkg, req, args)
Ejemplo n.º 26
0
def main():
    try:
        user_base = site.USER_BASE
    except AttributeError:
        user_base = abs_expanduser('~/.local')

    p = ArgumentParser(description=__doc__)
    p.add_argument('cnames',
                   metavar='NAME',
                   nargs='*',
                   help='package(s) to work on')
    p.add_argument("--add-url",
                   metavar='URL',
                   help="add a repository URL to the configuration file")
    p.add_argument("--config",
                   action="store_true",
                   help="display the configuration and exit")
    p.add_argument('-f',
                   "--force",
                   action="store_true",
                   help="force install the main package "
                   "(not it's dependencies, see --forceall)")
    p.add_argument("--forceall",
                   action="store_true",
                   help="force install of all packages "
                   "(i.e. including dependencies)")
    p.add_argument("--hook",
                   action="store_true",
                   help="don't install into site-packages (experimental)")
    p.add_argument("--imports",
                   action="store_true",
                   help="show which packages can be imported")
    p.add_argument('-i',
                   "--info",
                   action="store_true",
                   help="show information about a package")
    p.add_argument("--log", action="store_true", help="print revision log")
    p.add_argument('-l',
                   "--list",
                   action="store_true",
                   help="list the packages currently installed on the system")
    p.add_argument(
        '-n',
        "--dry-run",
        action="store_true",
        help="show what would have been downloaded/removed/installed")
    p.add_argument('-N',
                   "--no-deps",
                   action="store_true",
                   help="neither download nor install dependencies")
    p.add_argument("--env",
                   action="store_true",
                   help="based on the configuration, display how to set the "
                   "some environment variables")
    p.add_argument("--prefix",
                   metavar='PATH',
                   help="install prefix (disregarding of any settings in "
                   "the config file)")
    p.add_argument("--proxy", metavar='URL', help="use a proxy for downloads")
    p.add_argument("--remove", action="store_true", help="remove a package")
    p.add_argument("--revert",
                   metavar="REV",
                   help="revert to a previous set of packages")
    p.add_argument('-s',
                   "--search",
                   action="store_true",
                   help="search the index in the repo of packages "
                   "and display versions available.")
    p.add_argument("--sys-config",
                   action="store_true",
                   help="use <sys.prefix>/.enstaller4rc (even when "
                   "~/.enstaller4rc exists)")
    p.add_argument("--sys-prefix",
                   action="store_true",
                   help="use sys.prefix as the install prefix")
    p.add_argument("--user",
                   action="store_true",
                   help="install into user prefix, i.e. --prefix=%r" %
                   user_base)
    p.add_argument("--userpass",
                   action="store_true",
                   help="change EPD authentication in configuration file")
    p.add_argument('-v', "--verbose", action="store_true")
    p.add_argument('--version',
                   action="version",
                   version='enstaller version: ' + __version__)
    p.add_argument("--whats-new",
                   action="store_true",
                   help="display to which installed packages updates are "
                   "available")
    args = p.parse_args()

    if len(args.cnames) > 0 and (args.config or args.env or args.userpass
                                 or args.revert or args.log or args.whats_new):
        p.error("Option takes no arguments")

    if args.user:
        args.prefix = user_base

    if args.prefix and args.sys_prefix:
        p.error("Options --prefix and --sys-prefix exclude each other")

    if args.force and args.forceall:
        p.error("Options --force and --forceall exclude each other")

    pat = None
    if (args.list or args.search) and args.cnames:
        pat = re.compile(args.cnames[0], re.I)

    # make prefix
    if args.sys_prefix:
        prefix = sys.prefix
    elif args.prefix:
        prefix = args.prefix
    else:
        prefix = config.get('prefix', sys.prefix)

    # now make prefixes
    if prefix == sys.prefix:
        prefixes = [sys.prefix]
    else:
        prefixes = [prefix, sys.prefix]

    if args.verbose:
        print "Prefixes:"
        for p in prefixes:
            print '    %s%s' % (p, ['', ' (sys)'][p == sys.prefix])
        print

    if args.env:  # --env
        env_option(prefixes)
        return

    if args.log:  # --log
        if args.hook:
            raise NotImplementedError
        from history import History
        h = History(prefix)
        h.update()
        h.print_log()
        return

    if args.sys_config:  # --sys-config
        config.get_path = lambda: config.system_config_path

    if args.list:  # --list
        list_option(prefixes, args.hook, pat)
        return

    if args.proxy:  # --proxy
        setup_proxy(args.proxy)
    elif config.get('proxy'):
        setup_proxy(config.get('proxy'))
    else:
        setup_proxy()

    if 0:  # for testing event manager only
        from encore.events.api import EventManager
        from encore.terminal.api import ProgressDisplay
        evt_mgr = EventManager()
        display = ProgressDisplay(evt_mgr)
    else:
        evt_mgr = None

    if config.get('use_webservice'):
        remote = None  # Enpkg will create the default
    else:
        urls = [fill_url(u) for u in config.get('IndexedRepos')]
        remote = create_joined_store(urls)

    enpkg = Enpkg(remote,
                  prefixes=prefixes,
                  hook=args.hook,
                  evt_mgr=evt_mgr,
                  verbose=args.verbose)

    if args.config:  # --config
        config.print_config(enpkg.remote)
        return

    if args.userpass:  # --userpass
        username, password = config.input_auth()
        config.checked_change_auth(username, password, enpkg.remote)
        return

    if args.dry_run:

        def print_actions(actions):
            for item in actions:
                print '%-8s %s' % item

        enpkg.execute = print_actions

    if args.imports:  # --imports
        assert not args.hook
        imports_option(enpkg, pat)
        return

    if args.add_url:  # --add-url
        add_url(args.add_url, args.verbose)
        return

    if args.revert:  # --revert
        if isfile(args.revert):
            arg = parse_list(args.revert)
        else:
            arg = args.revert
        try:
            actions = enpkg.revert_actions(arg)
            if not actions:
                print "Nothing to do"
                return
            enpkg.execute(actions)
        except EnpkgError as e:
            print e.message
        return

    # Try to auto-update enstaller
    if update_enstaller(enpkg, args):
        print "Enstaller has been updated.", \
            "Please re-run your previous command."
        return

    if args.search:  # --search
        search(enpkg, pat)
        return

    if args.info:  # --info
        if len(args.cnames) != 1:
            p.error("Option requires one argument (name of package)")
        info_option(enpkg, args.cnames[0])
        return

    if args.whats_new:  # --whats-new
        whats_new(enpkg)
        return

    if len(args.cnames) == 0:
        p.error("Requirement(s) missing")
    elif len(args.cnames) == 2:
        pat = re.compile(r'\d+\.\d+')
        if pat.match(args.cnames[1]):
            args.cnames = ['-'.join(args.cnames)]

    reqs = []
    for arg in args.cnames:
        if '-' in arg:
            name, version = arg.split('-', 1)
            reqs.append(Req(name + ' ' + version))
        else:
            reqs.append(Req(arg))

    if args.verbose:
        print "Requirements:"
        for req in reqs:
            print '    %r' % req
        print

    print "prefix:", prefix

    for req in reqs:
        if args.remove:  # --remove
            try:
                enpkg.execute(enpkg.remove_actions(req))
            except EnpkgError as e:
                print e.message
        else:
            install_req(enpkg, req, args)  # install (default)
Ejemplo n.º 27
0
def main():
    main_parser = argparse.ArgumentParser(prog="cosmicpi", description="CosmicPi acquisition process", add_help=False)
    main_parser.add_argument("--config", help="Path to configuration file", default="/etc/cosmicpi.yaml")
    args, remaining_argv = main_parser.parse_known_args()

    # Merge the default config with the configuration file
    config = load_config(args.config)

    # Parse the command line for overrides
    parser = argparse.ArgumentParser(parents=[main_parser])
    parser.set_defaults(**config)

    parser.add_argument("-i", "--host",       **arg("broker.host",          "Message broker host"))
    parser.add_argument("-p", "--port",       **arg("broker.port",          "Message broker port", type=int))
    parser.add_argument("-a", "--username",   **arg("broker.username",      "Message broker username"))
    parser.add_argument("-b", "--password",   **arg("broker.password",      "Message broker password"))
    parser.add_argument("-n", "--no-publish", **arg("broker.enabled",       "Disable event publication"))
    parser.add_argument("-u", "--usb",        **arg("usb.device",           "USB device name"))
    parser.add_argument("-d", "--debug",      **arg("debug",                "Enable debug mode"))
    parser.add_argument("-o", "--log-config", **arg("logging.config",       "Path to logging configuration"))
    parser.add_argument("-l", "--no-log",     **arg("logging.enabled",      "Disable file logging"))
    parser.add_argument("-v", "--no-vib",     **arg("monitoring.vibration", "Disable vibration monitoring"))
    parser.add_argument("-w", "--no-weather", **arg("monitoring.weather",   "Disable weather monitoring"))
    parser.add_argument("-c", "--no-cosmics", **arg("monitoring.cosmics",   "Disable cosmic ray monitoring"))
    parser.add_argument("-k", "--patk",       **arg("patok",                "Server push notification token"))

    options = parser.parse_args()

    log_config = options.logging["config"]
    print ("INFO: using logging configuration from %s" % log_config)
    logging.config.fileConfig(log_config, disable_existing_loggers=False)
    console = logging.getLogger(__name__)

    if options.debug:
        print_config(options)

    try:
        publisher = EventPublisher(options)
    except:
        console.error("Exception: Can't connect to broker")
        sys.exit(1)

    try:
        usb = UsbHandler(options.usb['device'], 9600, 60)
        usb.open()
    except Exception as e:
        console.error("Exception: Can't open USB device: %s" % e)
        sys.exit(1)

    detector = Detector(usb, publisher, options)

    try:
        detector.start()
        command_handler = CommandHandler(detector, usb, options)
        command_handler.start()

        while True:
            time.sleep(1)

    except Exception as e:
        console.info("Exception: main: %s" % e)
        traceback.print_exc()

    finally:
        detector.stop()
        console.info("Quitting ...")
        time.sleep(1)
        usb.close()
        publisher.close()
        sys.exit(0)
Ejemplo n.º 28
0
def main():
    # Get running configuration
    config, _ = get_config()
    print_config()

    # Build tensorflow graph from config
    print("Building graph...")
    actor = Actor(config)

    # Saver to save & restore all the variables.
    variables_to_save = [
        v for v in tf.global_variables() if 'Adam' not in v.name
    ]
    saver = tf.train.Saver(var_list=variables_to_save,
                           keep_checkpoint_every_n_hours=1.0)

    print("Starting session...")
    with tf.Session() as sess:
        # Run initialize op
        sess.run(tf.global_variables_initializer())

        # Restore variables from disk.
        if config.restore_model == True:
            saver.restore(sess, "save/" + config.restore_from + "/actor.ckpt")
            print("Model restored.")

        # Initialize data generator
        solver = Solver(actor.max_length)  ###### ######
        training_set = DataGenerator(solver)

        # Training mode
        if not config.inference_mode:

            # Summary writer
            writer = tf.summary.FileWriter(config.log_dir, sess.graph)

            print("Starting training...")
            for i in tqdm(range(config.nb_epoch)):
                # Get feed dict
                input_batch = training_set.train_batch(actor.batch_size,
                                                       actor.max_length,
                                                       actor.input_dimension)
                feed = {actor.input_: input_batch}
                if i % 5 == 0:

                    sess.run(actor.assign_op, feed_dict=feed)

                # Forward pass & train step
                summary, base_op, train_step1, train_step2 = sess.run(
                    [
                        actor.merged, actor.base_op, actor.train_step1,
                        actor.train_step2
                    ],
                    feed_dict=feed)

                if i % 100 == 0:
                    writer.add_summary(summary, i)

                # Save the variables to disk
                if i % max(1, int(config.nb_epoch / 5)) == 0 and i != 0:
                    save_path = saver.save(sess,
                                           "save/" + config.save_to +
                                           "/tmp.ckpt",
                                           global_step=i)
                    print("\n Model saved in file: %s" % save_path)

            print("Training COMPLETED !")
            saver.save(sess, "save/" + config.save_to + "/actor.ckpt")

        # Inference mode
        else:

            targets = []
            predictions = []

            for __ in tqdm(range(1000)):  # num of examples

                # Get feed_dict (single input)
                seed_ = 1 + __
                input_batch, or_sequence = training_set.test_batch(
                    actor.batch_size,
                    actor.max_length,
                    actor.input_dimension,
                    seed=seed_)  # seed=0 means None
                feed = {actor.input_: input_batch}

                # Solve instance (OR tools)
                opt_trip, opt_length = training_set.solve_instance(or_sequence)
                targets.append(opt_length / 100)
                #print('\n Optimal length:',opt_length/100)

                ################################### UMPA LOOOOP HERE ###################################    nb_loop / temperature

                # Sample solutions
                permutations, circuit_length = sess.run(
                    [actor.positions, actor.distances], feed_dict=feed)
                #training_set.visualize_sampling(permutations)

                # Find best solution
                #print(circuit_length)
                j = np.argmin(circuit_length)
                best_permutation = permutations[j][:-1]
                predictions.append(circuit_length[j])

                ################################### UMPA LOOOOP HERE ###################################

                #print('\n Best tour length:',circuit_length[j])
                #print(' * permutation:', best_permutation)

                # plot corresponding tour
                #training_set.visualize_2D_trip(opt_trip)
                #training_set.visualize_2D_trip(or_sequence[best_permutation])

            predictions = np.asarray(predictions)
            targets = np.asarray(targets)

            print(' Mean length:', np.mean(predictions))
            ratio = np.asarray(predictions) / np.asarray(targets)
            print('\n Average deviation: \n', np.mean(ratio))

            n, bins, patches = plt.hist(ratio, 50, facecolor='r', alpha=0.75)

            plt.xlabel('Prediction/target')
            plt.ylabel('Counts')
            plt.title('Comparison to Google OR tools')
            plt.axis([0.9, 1.4, 0, 500])
            plt.grid(True)
            plt.show()
Ejemplo n.º 29
0
                        type=str,
                        default='true',
                        help='whether to train or not')
    parser.add_argument('-dl',
                        '--delete_log',
                        type=str,
                        default='false',
                        help='whether to train or not')
    parser.add_argument('-l',
                        '--loss',
                        type=str,
                        default='image',
                        help='loss mode')
    args = parser.parse_args()

    config = get_config(args.mode)
    config.is_train = args.is_train
    config.delete_log = args.delete_log
    config.loss = args.loss

    print(toGreen('Laoding Config...'))
    print_config(config)

    is_train = to_bool(args.is_train)
    handle_directory(config, to_bool(args.delete_log))

    if is_train:
        train(config)
    else:
        tensorOutput = estimate(tensorFirst, tensorSecond)
Ejemplo n.º 30
0
def main():
    try:
        user_base = site.USER_BASE
    except AttributeError:
        user_base = abs_expanduser('~/.local')

    p = ArgumentParser(description=__doc__)
    p.add_argument('cnames', metavar='NAME', nargs='*',
                   help='package(s) to work on')
    p.add_argument("--add-url", metavar='URL',
                   help="add a repository URL to the configuration file")
    p.add_argument("--config", action="store_true",
                   help="display the configuration and exit")
    p.add_argument('-f', "--force", action="store_true",
                   help="force install the main package "
                        "(not it's dependencies, see --forceall)")
    p.add_argument("--forceall", action="store_true",
                   help="force install of all packages "
                        "(i.e. including dependencies)")
    p.add_argument("--hook", action="store_true",
                   help="don't install into site-packages (experimental)")
    p.add_argument("--imports", action="store_true",
                   help="show which packages can be imported")
    p.add_argument('-i', "--info", action="store_true",
                   help="show information about a package")
    p.add_argument("--log", action="store_true", help="print revision log")
    p.add_argument('-l', "--list", action="store_true",
                   help="list the packages currently installed on the system")
    p.add_argument('-n', "--dry-run", action="store_true",
               help="show what would have been downloaded/removed/installed")
    p.add_argument('-N', "--no-deps", action="store_true",
                   help="neither download nor install dependencies")
    p.add_argument("--env", action="store_true",
                   help="based on the configuration, display how to set the "
                        "some environment variables")
    p.add_argument("--prefix", metavar='PATH',
                   help="install prefix (disregarding of any settings in "
                        "the config file)")
    p.add_argument("--proxy", metavar='URL', help="use a proxy for downloads")
    p.add_argument("--remove", action="store_true", help="remove a package")
    p.add_argument("--revert", metavar="REV",
                   help="revert to a previous set of packages")
    p.add_argument('-s', "--search", action="store_true",
                   help="search the index in the repo of packages "
                        "and display versions available.")
    p.add_argument("--sys-config", action="store_true",
                   help="use <sys.prefix>/.enstaller4rc (even when "
                        "~/.enstaller4rc exists)")
    p.add_argument("--sys-prefix", action="store_true",
                   help="use sys.prefix as the install prefix")
    p.add_argument("--user", action="store_true",
               help="install into user prefix, i.e. --prefix=%r" % user_base)
    p.add_argument("--userpass", action="store_true",
                   help="change EPD authentication in configuration file")
    p.add_argument('-v', "--verbose", action="store_true")
    p.add_argument('--version', action="version",
                   version='enstaller version: ' + __version__)
    p.add_argument("--whats-new", action="store_true",
                   help="display to which installed packages updates are "
                        "available")
    args = p.parse_args()

    if len(args.cnames) > 0 and (args.config or args.env or args.userpass or
                                 args.revert or args.log or args.whats_new):
        p.error("Option takes no arguments")

    if args.user:
        args.prefix = user_base

    if args.prefix and args.sys_prefix:
        p.error("Options --prefix and --sys-prefix exclude each ohter")

    if args.force and args.forceall:
        p.error("Options --force and --forceall exclude each ohter")

    pat = None
    if (args.list or args.search) and args.cnames:
        pat = re.compile(args.cnames[0], re.I)

    # make prefix
    if args.sys_prefix:
        prefix = sys.prefix
    elif args.prefix:
        prefix = args.prefix
    else:
        prefix = config.get('prefix', sys.prefix)

    # now make prefixes
    if prefix == sys.prefix:
        prefixes = [sys.prefix]
    else:
        prefixes = [prefix, sys.prefix]

    if args.verbose:
        print "Prefixes:"
        for p in prefixes:
            print '    %s%s' % (p, ['', ' (sys)'][p == sys.prefix])
        print

    if args.env:                                  # --env
        env_option(prefixes)
        return

    if args.log:                                  # --log
        if args.hook:
            raise NotImplementedError
        from history import History
        h = History(prefix)
        h.update()
        h.print_log()
        return

    if args.sys_config:                           # --sys-config
        config.get_path = lambda: config.system_config_path

    if args.config:                               # --config
        config.print_config()
        return

    if args.list:                                 # --list
        list_option(prefixes, args.hook, pat)
        return

    if args.proxy:                                # --proxy
        setup_proxy(args.proxy)
    elif config.get('proxy'):
        setup_proxy(config.get('proxy'))
    else:
        setup_proxy()

    if 0: # for testing event manager only
        from encore.events.api import EventManager
        from encore.terminal.api import ProgressDisplay
        evt_mgr = EventManager()
        display = ProgressDisplay(evt_mgr)
    else:
        evt_mgr = None

    if config.get('use_webservice'):
        remote = None # Enpkg will create the default
    else:
        urls = [fill_url(u) for u in config.get('IndexedRepos')]
        remote = create_joined_store(urls)

    enpkg = Enpkg(remote, prefixes=prefixes, hook=args.hook,
                  evt_mgr=evt_mgr, verbose=args.verbose)

    if args.userpass:                             # --userpass
        auth = username, password = config.input_auth()
        if remote is not None:
            try:
                print 'Verifying username and password...'
                remote.connect(auth)
            except KeyError as e:
                print 'Invalid Username or Password'
            except Exception as e:
                print e.message
            else:
                config.change_auth(username, password)
        else:
            config.change_auth(username, password)
        return

    if args.dry_run:
        def print_actions(actions):
            for item in actions:
                print '%-8s %s' % item
        enpkg.execute = print_actions

    if args.imports:                              # --imports
        assert not args.hook
        imports_option(enpkg, pat)
        return

    if args.add_url:                              # --add-url
        add_url(args.add_url, args.verbose)
        return

    if args.revert:                               # --revert
        if isfile(args.revert):
            arg = parse_list(args.revert)
        else:
            arg = args.revert
        try:
            actions = enpkg.revert_actions(arg)
            if not actions:
                print "Nothing to do"
                return
            enpkg.execute(actions)
        except EnpkgError as e:
            print e.message
        return

    if args.search:                               # --search
        search(enpkg, pat)
        return

    if args.info:                                 # --info
        if len(args.cnames) != 1:
            p.error("Option requires one argument (name of package)")
        info_option(enpkg, args.cnames[0])
        return

    if args.whats_new:                            # --whats-new
        whats_new(enpkg)
        return

    if len(args.cnames) == 0:
        p.error("Requirement(s) missing")
    elif len(args.cnames) == 2:
        pat = re.compile(r'\d+\.\d+')
        if pat.match(args.cnames[1]):
            args.cnames = ['-'.join(args.cnames)]

    reqs = []
    for arg in args.cnames:
        if '-' in arg:
            name, version = arg.split('-', 1)
            reqs.append(Req(name + ' ' + version))
        else:
            reqs.append(Req(arg))

    if args.verbose:
        print "Requirements:"
        for req in reqs:
            print '    %r' % req
        print

    print "prefix:", prefix

    for req in reqs:
        if args.remove:                               # --remove
            try:
                enpkg.execute(enpkg.remove_actions(req))
            except EnpkgError as e:
                print e.message
        else:
            install_req(enpkg, req, args)             # install (default)
Ejemplo n.º 31
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model = ising.ISING_C4V(hx=args.hx, q=args.q)
    energy_f = model.energy_1x1_nn if args.q == 0 else model.energy_1x1_plaqette

    # initialize an ipeps
    if args.instate != None:
        state = read_ipeps_c4v(args.instate)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
        state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max(
            torch.abs(state.sites[(0, 0)]))
    elif args.opt_resume is not None:
        state = IPEPS_C4V(torch.tensor(0.))
        state.load_checkpoint(args.opt_resume)
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim
        A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)
        # A= make_c4v_symm(A)
        # A= A/torch.max(torch.abs(A))
        A = A / A.norm()
        state = IPEPS_C4V(A)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    @torch.no_grad()
    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        if not history:
            history = []
        e_curr = energy_f(state, env)
        history.append(e_curr.item())
        if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\
            or len(history) >= ctm_args.ctm_max_iter:
            log.info({"history_length": len(history), "history": history})
            return True, history
        return False, history

    @torch.no_grad()
    def ctmrg_conv_rdm2x1(state, env, history, ctm_args=cfg.ctm_args):
        if not history:
            history = dict({"log": []})
        rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
        dist = float('inf')
        if len(history["log"]) > 0:
            dist = torch.dist(rdm2x1, history["rdm"], p=2).item()
        history["rdm"] = rdm2x1
        history["log"].append(dist)
        if dist < ctm_args.ctm_conv_tol or len(
                history["log"]) >= ctm_args.ctm_max_iter:
            log.info({
                "history_length": len(history['log']),
                "history": history['log']
            })
            return True, history
        return False, history

    state_sym = to_ipeps_c4v(state)
    ctm_env = ENV_C4V(args.chi, state_sym)
    init_env(state_sym, ctm_env)

    ctm_env, *ctm_log = ctmrg_c4v.run(state_sym,
                                      ctm_env,
                                      conv_check=ctmrg_conv_rdm2x1)
    loss = energy_f(state_sym, ctm_env)
    obs_values, obs_labels = model.eval_obs(state_sym, ctm_env)
    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values]))

    def loss_fn(state, ctm_env_in, opt_context):
        ctm_args = opt_context["ctm_args"]
        opt_args = opt_context["opt_args"]
        # create a copy of state, symmetrize and normalize making all operations
        # tracked. This does not "overwrite" the parameters tensors, living outside
        # the scope of loss_fn
        state_sym = to_ipeps_c4v(state, normalize=True)

        # possibly re-initialize the environment
        if cfg.opt_args.opt_ctm_reinit:
            init_env(state_sym, ctm_env_in)

        # 1) compute environment by CTMRG
        ctm_env_out, *ctm_log = ctmrg_c4v.run(state_sym,
                                              ctm_env_in,
                                              conv_check=ctmrg_conv_rdm2x1,
                                              ctm_args=ctm_args)
        loss = energy_f(state_sym, ctm_env_out)

        return (loss, ctm_env_out, *ctm_log)

    def _to_json(l):
        re = [l[i, 0].item() for i in range(l.size()[0])]
        im = [l[i, 1].item() for i in range(l.size()[0])]
        return dict({"re": re, "im": im})

    @torch.no_grad()
    def obs_fn(state, ctm_env, opt_context):
        if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \
            or not "line_search" in opt_context.keys():
            state_sym = to_ipeps_c4v(state, normalize=True)
            epoch = len(opt_context["loss_history"]["loss"])
            loss = opt_context["loss_history"]["loss"][-1]
            obs_values, obs_labels = model.eval_obs(state_sym, ctm_env)
            print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]\
                + [f"{state.site().norm()}"]))

            if args.top_freq > 0 and epoch % args.top_freq == 0:
                coord_dir_pairs = [((0, 0), (1, 0))]
                for c, d in coord_dir_pairs:
                    # transfer operator spectrum
                    print(f"TOP spectrum(T)[{c},{d}] ", end="")
                    l = transferops_c4v.get_Top_spec_c4v(
                        args.top_n, state_sym, ctm_env)
                    print("TOP " + json.dumps(_to_json(l)))

    # optimize
    optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn)

    # compute final observables for the best variational state
    outputstatefile = args.out_prefix + "_state.json"
    state = read_ipeps_c4v(outputstatefile)
    ctm_env = ENV_C4V(args.chi, state)
    init_env(state, ctm_env)
    ctm_env, *ctm_log = ctmrg_c4v.run(state,
                                      ctm_env,
                                      conv_check=ctmrg_conv_energy)
    opt_energy = energy_f(state, ctm_env)
    obs_values, obs_labels = model.eval_obs(state, ctm_env)
    print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] +
                    [f"{v}" for v in obs_values]))
Ejemplo n.º 32
0
def train_agent(experiments_dir='./experiments'):
    logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')
    print_config()

    dataset = load_dataset(CONFIG['dataset'], CONFIG['dataset_path'])
    env = create_env(dataset, CONFIG)
    agent = create_agent(env, CONFIG)

    # Seeding for reproducable experiments
    set_random_seed(CONFIG['seed_agent'], gpus=[CONFIG['gpu_id']])
    env.seed(CONFIG['seed_environment'])

    # Prepare experiment directory
    now_date = datetime.datetime.now()
    timestr = now_date.strftime("%Y-%m-%d+%H-%M-%S")
    experiment_path = os.path.join(experiments_dir,
                                   CONFIG['experiment_id'] + "_" + timestr)
    ensure_folder(experiment_path)
    write_config(experiment_path)

    step_hooks = []
    logger = None

    if CONFIG['use_tensorboard']:
        tensorboard_path = os.path.join(experiment_path, "tensorboard")
        ensure_folder(tensorboard_path)
        eval_run_count = 10
        writer = SummaryWriter(tensorboard_path)
        step_hooks = [TensorBoardLoggingStepHook(writer)]
        handler = TensorBoardEvaluationLoggingHandler(writer, agent,
                                                      eval_run_count)
        logger = logging.getLogger()
        logger.addHandler(handler)

    # Inject hook for recording custom stats during training
    record_stats = chainerrl.experiments.evaluator.record_stats
    chainerrl.experiments.evaluator.record_stats = create_stats_decorator(env)(
        record_stats)

    train_agent_with_evaluation(
        agent,
        env,
        steps=CONFIG['steps'],  # Train the agent for no of steps
        eval_n_episodes=CONFIG[
            'eval_n_episodes'],  # Episodes are sampled for each evaluation
        eval_n_steps=None,
        train_max_episode_len=CONFIG[
            'train_max_episode_len'],  # Maximum length of each episodes
        eval_interval=CONFIG[
            'eval_interval'],  # Evaluate the agent after every no of steps
        outdir=experiment_path,  # Save everything to experiment directory
        step_hooks=step_hooks,
        logger=logger)

    # Save the final model
    agent_classname = agent.__class__.__name__[:10]
    agent_path = os.path.join(experiment_path, "agent" + "_" + agent_classname)
    ensure_folder(agent_path)
    agent.save(agent_path)

    # Plot training summary
    if not os.path.exists(os.path.join(experiment_path, 'training')):
        plot_training_summary(experiment_path)

    return experiment_path




if __name__ == "__main__":
    # get config
    config, _ = get_config()

    # Build Model and Reward from config
    actor = Actor(config)

    print("Starting training...")
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        print_config()

        solver = [] #Solver(actor.max_length)
        training_set = DataGenerator(solver)

        nb_epoch=2
        for i in tqdm(range(nb_epoch)): # epoch i

            # Get feed_dict
            input_batch  = training_set.train_batch(actor.batch_size, actor.max_length, actor.input_dimension)
            feed = {actor.input_: input_batch}
            #print(' Input \n', input_batch)

            permutation, distances = sess.run([actor.positions, actor.distances], feed_dict=feed) 
            print(' Permutation \n',permutation)
            print(' Tour length \n',distances)
Ejemplo n.º 34
0
def main():
    # Get running configuration
    config, _ = get_config()
    print_config()


    # Build tensorflow graph from config
    print("Building graph...")
    actor = Actor(config)

    # Creating dataset
    if not config.inference_mode:
        l = []
        for i in range(config.nCells):
            for j in range(config.nMuts):
                l.append([i,j])
        l = np.asarray(l)

    
    # Saver to save & restore all the variables.
    variables_to_save = [v for v in tf.global_variables() if 'Adam' not in v.name]
    saver = tf.train.Saver(var_list=variables_to_save, keep_checkpoint_every_n_hours=1.0, max_to_keep= 1000)  

    print("Starting session...")
    with tf.Session() as sess:
        # Run initialize op
        sess.run(tf.global_variables_initializer())

        # Training mode
        if not config.inference_mode:

            dataset = data(config.nb_epoch*config.batch_size, config.nCells, config.nMuts, config.ms_dir, config.alpha, config.beta)
            print('Dataset was created!')
            matrices_p, matrices_n = dataset

            print("Starting training...")
            for i in tqdm(range(config.nb_epoch)): 
             
                feed = {actor.input_: train_batch(config, np.asarray(matrices_n), l, i)}

                # Forward pass & train step
                summary, train_step1, train_step2 = sess.run([actor.merged, actor.train_step1, actor.train_step2], feed_dict=feed)

            print("Training COMPLETED !")
            saver.save(sess, config.save_to + "/actor.ckpt")
        # Inference mode
        else:

            dataset = data(config.nTestMats, config.nCells, config.nMuts, config.ms_dir, config.alpha, config.beta)
            print('Dataset was created!')
            matrices_p, matrices_n = dataset
            matrices_n_t = np.asarray(matrices_n)
            matrices_p_t = np.asarray(matrices_p)
            nMats = np.shape(matrices_n_t)[0]

            saver.restore(sess, config.restore_from + "/actor.ckpt")
            print("Model restored.")
            
            V_o = np.zeros((nMats, 1), dtype = np.float64)
            f_1_to_0_o = np.zeros((nMats, 1), dtype = np.float64)
            f_0_to_1_o = np.zeros((nMats, 1), dtype = np.float64)
            N00_o = np.zeros((nMats, 1), dtype = np.float64)
            N11_o = np.zeros((nMats, 1), dtype = np.float64)            
            N00_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            N11_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            N10_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            N01_NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            NLL_o = np.zeros((nMats, 1), dtype = np.float64)
            V_o = np.zeros((nMats, 1), dtype = np.float64)
            
            
            fp_fn = np.zeros((nMats, config.nCells, config.nMuts), dtype = np.float32)
            for k in range(np.shape(matrices_n_t)[0]):
                
                fp_fn[k, matrices_n_t[k,:,:] == 1] = config.alpha
                fp_fn[k, matrices_n_t[k,:,:] == 0] = config.beta
                
                
                N01_o_ = np.sum(matrices_n_t[k,:,:] - matrices_p_t[k,:,:] == -1) 
                N10_o_ = np.sum(matrices_p_t[k,:,:] - matrices_n_t[k,:,:] == -1)
                N11_o_ = np.sum(matrices_p_t[k,:,:] + matrices_n_t[k,:,:] == 2)
                N00_o_ = np.sum(matrices_p_t[k,:,:] - matrices_n_t[k,:,:] == 0) - N11_o_
                
                f_1_to_0_o[k, 0] = N10_o_
                f_0_to_1_o[k, 0] = N01_o_
                # fp_o = config.alpha
                # fn_o = config.beta
                

                N00_o[k, 0] = N00_o_
                N11_o[k, 0] = N11_o_
                N00_NLL_o[k, 0] = N00_o_*np.log(1/(1-config.beta))
                N11_NLL_o[k, 0] = N11_o_*np.log(1/(1-config.alpha))
                N01_NLL_o[k, 0] = N01_o_*np.log(1/config.beta)
                N10_NLL_o[k, 0] = N10_o_*np.log(1/config.alpha)
                NLL_o[k, 0] = np.sum([N00_NLL_o[k, 0], N11_NLL_o[k, 0], N01_NLL_o[k, 0], N10_NLL_o[k, 0]])
                
                k += 1
                     
            l = []
            for i in range(config.nCells):
                for j in range(config.nMuts):
                    l.append([i,j])
            l = np.asarray(l)
            max_length = config.nCells * config.nMuts
            a = np.expand_dims(matrices_n_t.reshape(-1, actor.max_length),2)
            b = np.expand_dims(fp_fn.reshape(-1, actor.max_length),2)
            x = np.tile(l,(nMats,1,1))
            c = np.squeeze(np.concatenate([x,b,a], axis = 2))
            d = np.asarray([np.take(c[i,:,:],np.random.permutation(c[i,:,:].shape[0]),axis=0,out=c[i,:,:]) for i in range(np.shape(c)[0])])
            
            output_ = np.zeros((nMats, 14), dtype = np.float64)
            for j in tqdm(range(nMats)): # num of examples
                start_t = time()

                input_batch = np.tile(d[j,:,:],(actor.batch_size,1,1))
                
                feed = {actor.input_: input_batch}

                
                pos  = sess.run([actor.positions] , feed_dict=feed)[0]


                inp_ = tf.convert_to_tensor(input_batch, dtype=tf.float32)
                pos =  tf.convert_to_tensor(pos, dtype=tf.int32)

                
                r = tf.range(start = 0, limit = actor.batch_size, delta = 1)
                r = tf.expand_dims(r ,1)
                r = tf.expand_dims(r ,2)
                r3 = tf.cast(tf.ones([actor.max_length , 1]) * tf.cast(r, tf.float32), tf.int32)
                r4 = tf.squeeze(r, axis = 2)
                i = 0
                while i < int(max_length/10):    
                    r5 = tf.expand_dims(tf.fill([actor.batch_size], i), axis = 1)
                    u = tf.ones_like(r5)
                    r4_r5 = tf.concat([r4, r5], axis = 1)

                    pos_mask = tf.squeeze(tf.scatter_nd(indices = r4_r5, updates = u, shape = [actor.batch_size, actor.max_length, 1]), axis = 2)

                    pos_mask_cum1 = tf.cumsum(pos_mask, reverse = True, exclusive = True, axis = 1)
                    pos_mask_cum2 = tf.cumsum(pos_mask, reverse = False, exclusive = False, axis = 1) # for calculating NLL

                    per_pos = tf.concat([r3, tf.expand_dims(pos, axis = 2)], axis = 2)

                    per_ = tf.gather_nd(inp_, indices = per_pos)
            
                    per_matrix = per_[:,:,3:4]

                    # flipping the input
                    m1 = tf.multiply(tf.squeeze(per_matrix, axis = 2), tf.cast(pos_mask_cum1, tf.float32))
                    m1 = tf.subtract(tf.cast(pos_mask_cum1, tf.float32) , m1)
                    m2 = tf.multiply(tf.squeeze(per_matrix, axis = 2), tf.cast(pos_mask_cum2, tf.float32))
                    T_f = tf.add(m1, m2)

                    per_flipped = tf.concat([per_[:,:,0:3], tf.expand_dims(T_f, axis = 2)], axis = 2)
                    idx = tf.concat([r3, tf.cast(per_flipped[:,:,0:2], tf.int32)], axis = 2)
                    m_f = tf.scatter_nd(indices = tf.expand_dims(idx,2), updates = per_flipped[:,:,3:4], shape = tf.constant([actor.batch_size, actor.config.nCells, actor.config.nMuts]))           
                    c_v = actor.count3gametes(m_f) # cost for flipped matrix
                    V_rl = c_v.eval()
                    g = np.min(V_rl)
                    
                    # Calculating NLL
                    per_fp_fn = per_[:,:,2:3]
                    per_fp_fn_log = tf.log(1/per_fp_fn) # for N01 and N10
                    per_fp_fn_com = tf.subtract(tf.ones_like(per_fp_fn), per_fp_fn) # for N00 and N11
                    per_fp_fn_com_log = tf.log(1/per_fp_fn_com)

                    NLL_N10_N01 = tf.reduce_sum(tf.multiply(tf.squeeze(per_fp_fn_log, axis = 2), tf.cast(pos_mask_cum1, tf.float32)), axis = 1, keepdims = True)

                    per_matrix_mul_cum2 = tf.multiply(tf.squeeze(per_[:,:,3:4], axis = 2), tf.cast(pos_mask_cum2, tf.float32))
                    N11 = tf.reduce_sum(per_matrix_mul_cum2, axis = 1, keepdims = True)
                    N11_rl = tf.squeeze(N11, axis = 1).eval()
                    sum_mask_cum2 = tf.reduce_sum(tf.cast(pos_mask_cum2, tf.float32), axis = 1, keepdims = True )
                    N00 = tf.subtract(sum_mask_cum2, N11)
                    N00_rl = tf.squeeze(N00, axis = 1).eval()

                    sum_per_matrix = tf.reduce_sum(tf.squeeze(per_matrix, axis = 2) , axis = 1)
                    sum_per_fp =  tf.reduce_sum(tf.squeeze(tf.multiply(per_fp_fn, per_matrix) , axis = 2) , axis = 1)
                    fp = tf.divide(sum_per_fp, sum_per_matrix)
                    fp_r = fp.eval()

                    sum_per_fn = tf.subtract(tf.reduce_sum(tf.squeeze(per_fp_fn, axis = 2), axis = 1), sum_per_fp)
                    q = tf.cast(tf.tile(tf.constant([actor.max_length]), tf.constant([actor.batch_size])), tf.float32)
                    fn = tf.divide(sum_per_fn, tf.subtract(q, sum_per_matrix) )
                    fn_r = fn.eval()

                    fp_com = tf.log(1/tf.subtract(tf.cast(tf.tile(tf.constant([1]), tf.constant([actor.batch_size])), tf.float32), fp))
                    fn_com = tf.log(1/tf.subtract(tf.cast(tf.tile(tf.constant([1]), tf.constant([actor.batch_size])), tf.float32), fn))

                    N00_NLL = tf.multiply(tf.expand_dims(fp_com, axis = 1), N00)
                    N11_NLL = tf.multiply(tf.expand_dims(fn_com, axis = 1), N11)

                    NLL = tf.scalar_mul(config.gamma, tf.add_n([NLL_N10_N01, N00_NLL, N11_NLL ]))            
                    NLL_rl = tf.squeeze(NLL, axis =1).eval()
                    
                    g_w = np.where(V_rl == g)[0]
                    g_w_nll = np.argmin(NLL_rl[g_w])
                    gg = g_w[g_w_nll]    

                    
                    if g == 0:
                        c_v_rl = V_rl[gg]
                        m_rl = m_f.eval()[gg]                    
                        N10 = tf.reduce_sum(tf.multiply(tf.squeeze(per_matrix, axis = 2), tf.cast(pos_mask_cum1, tf.float32)), axis = 1, keepdims = True)
                        f_1_to_0_rl = tf.squeeze(N10, axis = 1)[gg].eval()
                        sum_mask_cum1 = tf.reduce_sum(tf.cast(pos_mask_cum1, tf.float32), axis = 1, keepdims = True )
                        N01 = tf.subtract(sum_mask_cum1, N10)
                        f_0_to_1_rl = tf.squeeze(N01, axis = 1)[gg].eval()
                        n_f = copy.deepcopy(i)
                        
                        # cost of original
                        idx = tf.concat([r3, tf.cast(inp_[:,:,0:2], tf.int32)], axis = 2)
                        m = tf.scatter_nd(indices = tf.expand_dims(idx,2), updates = inp_[:,:,3:4], shape = tf.constant([actor.batch_size, actor.config.nCells, actor.config.nMuts]))
                        c_v_o = actor.count3gametes(m)
                        c_n = c_v_o[0].eval()
                        fp_v = fp_r[gg]
                        fn_v = fn_r[gg]
                        c2 = copy.deepcopy(NLL_rl[gg])
                        
                        df = pd.DataFrame(m_rl.astype(int) , index = ['cell' + str(k1) for k1 in range(np.shape(m_rl)[0])], \
                                          columns = ['mut' + str(h1) for h1 in range(np.shape(m_rl)[1])])
                        df.index.rename('cellID/mutID', inplace=True)
                        df.to_csv(config.output_dir + '/mrl_{}.txt'.format(j + 1), sep='\t')
                        break
                        
                    c_t = tf.add(tf.squeeze(NLL, axis = 1), tf.cast(c_v, tf.float32))
                    
                    if i == 0:
                        c2 = copy.deepcopy(NLL_rl[gg])
                        c_v_rl = V_rl[gg]
                        n_f = copy.deepcopy(i)
                        f_0_to_1_rl = 0
                        f_1_to_0_rl = 0
                        m_rl = m_f.eval()[gg]
                        fp_v = fp_r[gg]
                        fn_v = fn_r[gg]
                        g1 = copy.deepcopy(g)
                        
                    if g1 > g: #c2 > NLL_rl[gg]:
                        c2 = copy.deepcopy(NLL_rl[gg])
                        c_v_rl = V_rl[gg]
                        n_f = copy.deepcopy(i)
                        f_0_to_1_rl = tf.squeeze(N01, axis = 1)[gg].eval()
                        f_1_to_0_rl = tf.squeeze(N10, axis = 1)[gg].eval()
                        m_rl = m_f.eval()[gg] 
                        fp_v = fp_r[gg]
                        fn_v = fn_r[gg]
                        g1 = copy.deepcopy(g)
                     
                    if i == int(max_length/10) - 1: 
                        # cost of original
                        idx = tf.concat([r3, tf.cast(inp_[:,:,0:2], tf.int32)], axis = 2)
                        m = tf.scatter_nd(indices = tf.expand_dims(idx,2), updates = inp_[:,:,3:4], shape = tf.constant([actor.batch_size, actor.config.nCells, actor.config.nMuts]))
                        c_v_o = actor.count3gametes(m)
                        c_n = c_v_o[0].eval()
                        df = pd.DataFrame(m_rl.astype(int) , index = ['cell' + str(k1) for k1 in range(np.shape(m_rl)[0])], \
                                          columns = ['mut' + str(h1) for h1 in range(np.shape(m_rl)[1])])
                        df.index.rename('cellID/mutID', inplace=True)
                        df.to_csv(config.output_dir + '/mrl_{}.txt'.format(j + 1), sep='\t') 
                    i += 1  
                dur_t = time() - start_t

                output_[j,0] = fp_v
                output_[j,1] = fn_v 
                output_[j,2] = c2  # cost (NLL part)
                output_[j,3] = c_v_rl  # cost (violation part)
                output_[j,4] = c_n # number of violations  for noisy matrix
                output_[j,5] = n_f # total number of flips based on rl
                output_[j,6] = f_0_to_1_rl
                output_[j,7] = f_1_to_0_rl
                output_[j,8] = dur_t
                # output_[j,9] = s_m[j]
                    
                    
                    
            output_[:,9] = np.squeeze(N00_o)
            output_[:,10] = np.squeeze(N11_o)
            output_[:,11] = np.squeeze(NLL_o)
            output_[:,12] = np.squeeze(f_1_to_0_o)
            output_[:,13] = np.squeeze(f_0_to_1_o)
            
            df = pd.DataFrame(output_, index = ["test" + str(k) for k in range(nMats)], \
                             columns = ["fp", "fn","NLL_rl", "V_rl", "V_o", "n_f", "f_0_to_1_rl", "f_1_to_0_rl",\
                                        "time", "N00_o", "N11_o", "NLL_o", "f_1_to_0_o", "f_0_to_1_o"])
            df.to_csv(config.output_dir + '/test_{nCells}x{nMuts}.csv'.format(nCells = config.nCells, nMuts = config.nMuts), sep = ',')
Ejemplo n.º 35
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model = coupledLadders.COUPLEDLADDERS(alpha=args.alpha)

    # initialize an ipeps
    # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice
    # coord into one of coordinates within unit-cell of iPEPS ansatz
    if args.instate != None:
        state = read_ipeps(args.instate)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
    elif args.opt_resume is not None:
        state = IPEPS(dict(), lX=2, lY=2)
        state.load_checkpoint(args.opt_resume)
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim

        A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)
        B = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)
        C = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)
        D = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)

        sites = {(0, 0): A, (1, 0): B, (0, 1): C, (1, 1): D}

        for k in sites.keys():
            sites[k] = sites[k] / torch.max(torch.abs(sites[k]))
        state = IPEPS(sites, lX=2, lY=2)
    else:
        raise ValueError("Missing trial state: --instate=None and --ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    if not state.dtype == model.dtype:
        cfg.global_args.torch_dtype = state.dtype
        print(
            f"dtype of initial state {state.dtype} and model {model.dtype} do not match."
        )
        print(f"Setting default dtype to {cfg.global_args.torch_dtype} and reinitializing "\
        +" the model")
        model = coupledLadders.COUPLEDLADDERS(alpha=args.alpha)

    print(state)

    @torch.no_grad()
    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        if not history:
            history = []
        e_curr = model.energy_2x1_1x2(state, env)
        e_curr = e_curr.real if e_curr.is_complex() else e_curr
        history.append(e_curr.item())

        if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\
            or len(history) >= ctm_args.ctm_max_iter:
            log.info({"history_length": len(history), "history": history})
            return True, history
        return False, history

    ctm_env = ENV(args.chi, state)
    init_env(state, ctm_env)

    ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy)
    loss0 = model.energy_2x1_1x2(state, ctm_env)
    obs_values, obs_labels = model.eval_obs(state, ctm_env)
    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{loss0}"] + [f"{v}" for v in obs_values]))

    def loss_fn(state, ctm_env_in, opt_context):
        ctm_args = opt_context["ctm_args"]
        opt_args = opt_context["opt_args"]

        # possibly re-initialize the environment
        if opt_args.opt_ctm_reinit:
            init_env(state, ctm_env_in)

        # 1) compute environment by CTMRG
        ctm_env_out, *ctm_log = ctmrg.run(state, ctm_env_in, \
            conv_check=ctmrg_conv_energy, ctm_args=ctm_args)

        # 2) evaluate loss with converged environment
        loss = model.energy_2x1_1x2(state, ctm_env_out)

        return (loss, ctm_env_out, *ctm_log)

    def _to_json(l):
        re = [l[i, 0].item() for i in range(l.size()[0])]
        im = [l[i, 1].item() for i in range(l.size()[0])]
        return dict({"re": re, "im": im})

    @torch.no_grad()
    def obs_fn(state, ctm_env, opt_context):
        if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \
            or not "line_search" in opt_context.keys():
            epoch = len(opt_context["loss_history"]["loss"])
            loss = opt_context["loss_history"]["loss"][-1]
            obs_values, obs_labels = model.eval_obs(state, ctm_env)
            print(", ".join([f"{epoch}", f"{loss}"] +
                            [f"{v}" for v in obs_values]))

            with torch.no_grad():
                if args.top_freq > 0 and epoch % args.top_freq == 0:
                    coord_dir_pairs = [((0, 0), (1, 0)), ((0, 0), (0, 1)),
                                       ((1, 1), (1, 0)), ((1, 1), (0, 1))]
                    for c, d in coord_dir_pairs:
                        # transfer operator spectrum
                        print(f"TOP spectrum(T)[{c},{d}] ", end="")
                        l = transferops.get_Top_spec(args.top_n, c, d, state,
                                                     ctm_env)
                        print("TOP " + json.dumps(_to_json(l)))

    # optimize
    optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn)

    # compute final observables for the best variational state
    outputstatefile = args.out_prefix + "_state.json"
    state = read_ipeps(outputstatefile)
    ctm_env = ENV(args.chi, state)
    init_env(state, ctm_env)
    ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy)
    loss0 = model.energy_2x1_1x2(state, ctm_env)
    obs_values, obs_labels = model.eval_obs(state, ctm_env)
    print(", ".join([f"{args.opt_max_iter}", f"{loss0}"] +
                    [f"{v}" for v in obs_values]))
Ejemplo n.º 36
0
def main():
    # Get running configuration
    config, _ = get_config()
    print_config()

    # Build tensorflow graph from config
    print("Building graph...")
    actor = Actor(config)

    # Saver to save & restore all the variables.
    variables_to_save = [
        v for v in tf.global_variables()
        if (v.name.startswith("encoder") | v.name.startswith("decoder")
            | v.name.startswith("loop") | v.name.startswith("beta1_power:0")
            | v.name.startswith("beta2_power:0")) and 'Adam' not in v.name
    ]
    saver = tf.train.Saver(var_list=variables_to_save,
                           keep_checkpoint_every_n_hours=1.0)

    print("Starting session...")
    with tf.Session() as sess:
        # Run initialize op
        sess.run(tf.global_variables_initializer())

        # Restore variables from disk.
        if config.restore_model == True:
            saver.restore(sess, "./save/" + config.sp_save + "/tmp.ckpt-2800")

            print("Model restored.")

        # Initialize data generator
        training_set = DataLoader(config.batch_size, config.max_length,
                                  config.input_dimension)
        # Training mode
        if not config.inference_mode:

            # Summary writer
            writer = tf.summary.FileWriter(config.log_dir, sess.graph)

            print("Starting training...")
            for i in tqdm(range(config.nb_epoch)):
                # Get feed dict
                input_batch = training_set.gen_train_dataset()
                feed = {actor.input_: input_batch}

                # Forward pass & train step
                summary, base_op, train_step1, train_step2 = sess.run(
                    [
                        actor.merged, actor.base_op, actor.train_step1,
                        actor.train_step2
                    ],
                    feed_dict=feed)

                if i % 10 == 0:
                    writer.add_summary(summary, i)
                # Save the variables to disk
                if i % max(1, int(config.nb_epoch / 5)) == 0 and i != 0:
                    save_path = saver.save(sess,
                                           "save/" + config.save_to +
                                           "/tmp.ckpt",
                                           global_step=i)
                    print("\n Model saved in file: %s" % save_path)

            print("Training COMPLETED !")
            saver.save(sess, "save/" + config.save_to + "/actor.ckpt")

        # Inference mode
        else:

            targets = []
            predictions = []

            for __ in tqdm(range(1000)):  # num of examples

                # Get feed_dict (single input)
                seed_ = 1 + __
                input_batch = training_set.gen_test_dataset()
                feed = {actor.input_: input_batch}
                #print("input_batch:", input_batch)
                # Solve instance (OR tools)
                #opt_trip, opt_length = training_set.solve_instance(or_sequence)
                targets.append(1)
                #print('\n Optimal length:',opt_length/100)

                ################################### UMPA LOOOOP HERE ###################################    nb_loop / temperature

                # Sample solutions
                permutations, circuit_length = sess.run(
                    [actor.positions, actor.distances], feed_dict=feed)
                #training_set.visualize_sampling(permutations)

                # Find best solution
                j = np.argmin(circuit_length)
                best_permutation = permutations[j][:-1]
                predictions.append(circuit_length[j])
                print(best_permutation, circuit_length[j])

            predictions = np.asarray(predictions)
            print(' Mean length:', np.mean(predictions))
            ratio = np.asarray(predictions) / np.asarray(targets)
            print('\n Average deviation: \n', np.mean(ratio))