コード例 #1
0
def memory_test_basic(params,mem_system,row,col):
    
    """
     Run memory test for a given number of trials and average the results, a simplified version of memory_test
   
     Parameters
     --------------------------
    
     params : class instance
            contains simulation parameters
    
     mem_systems : list of class instances
                 contains memory systems with different pattern separation values
                      
     row,col : int 
         the row and column of the dataframe to store data to. Typically, row represents offset and col the noise level
     
    
     """

    import memory.test_types as test
    data = np.zeros((1,params.trials)).tolist()[0]
    for t in range(params.trials):
        mem_system.item_retrieval(col)
        test.roc_test(mem_system)
        data[t] = [copy.deepcopy(mem_system.retrieved), copy.deepcopy(mem_system.performance)]
    save_average(mem_system.dataFrame,data,row,col) # average the data over all trials
    if col == params.noise[-1]:
        save_data(mem_system.dataFrame,str(params.list_length)+'-'+str(mem_system.scale),'pkl', folder=params.data_dir)
コード例 #2
0
def memory_test(params,mem_systems,row,col):
    
     """
     Run memory test for a given number of trials and average the results
   
     Parameters
     --------------------------
    
     params : class instance
            contains simulation parameters
    
     mem_systems : list of class instances
                 contains memory systems with different pattern separation values
                 
     comb : list of class instances
          contains memory systems combining the system with no pattern separation with the ones having different pattern separation values
     
     row,col : int 
         the row and column of the dataframe to store data to. Typically, row represents offset and col the noise level
     
     """
     
     import memory.test_types as test
     import Input.patterns as inp
     
     data = np.zeros(((params.hip+1),params.trials)).tolist()
         
     for t in range(params.trials):
         # load the stimuli
         input_patterns = inp.probes(params)
         input_patterns.probe_faces(seed=t)
         input_patterns.probe_assignment() 
         params.d_pattern = np.shape(input_patterns.test)[-1] # ignore

         # perform scaling
         [mem_sys.perform_scaling(input_patterns.study_all, 'study') for mem_sys in mem_systems]
         [mem_sys.perform_scaling(input_patterns.test, 'test') for mem_sys in mem_systems]

         for m,mem in enumerate(mem_systems):
             mem.target_memory = []
             mem.item_retrieval(col)
             test.roc_test(mem)
             data[m][t] = [copy.deepcopy(mem.retrieved), copy.deepcopy(mem.performance)]
                                 
     for m,mem in enumerate(mem_systems):  
        save_average(mem.dataFrame,data[m],row,col) # average the data over all trials
        if row == params.offset[-1] and col == params.noise[-1]:
           save_data(mem.dataFrame,str(params.list_length)+'-'+str(mem.scale),'pkl', folder=params.data_dir)
           
     return data    
コード例 #3
0
def run_routine(params):
    """
    Runs the routine simulation
    
    Parameters
    ---------
    params : class instance
        simulation parameters
        
    """

    #Initialize the memory systems
    mem_systems = [
        memory.memory_system(params, params.scale[i])
        for i in range(len(params.scale))
    ]

    for o in range(len(params.offset)):
        mems = []

        #make and store the memory modules
        for m in range(len(params.scale)):
            mem_sys = mem_systems[m]
            mems.append(mem_sys)

        #perform the memory test for different levels of memory noise
        print('Stimuli loaded, memory systems initialized')
        print('Performing the memory tests for %s trials...' % params.trials)
        for nn in params.noise:
            data = memory_test(params, mems, params.offset[o], nn)

    if params.strength in ['MS', 'MW']:
        params.N_test = int(params.N_test / 2)

    #save the information if required
    if params.matlab:  # to fit the data by the Yonelinas model
        mat = matlab(params)
        mat.prepare_data()

    if params.save_metadata:
        folder = params.path + '/Log'
        save_data(params,
                  'metadata_' + params.simID,
                  file_format='pkl',
                  folder=folder)
    return mems, data
コード例 #4
0
# this is needed so that data is stored in the correct file, more relevant for the main code
params.cond = str(params.N_test)
params.data_dir = params.data_dir + params.cond + '/'

# initialize the class of inputs
input_patterns = inp.probes(params)
# load the face stimuli
probes = input_patterns.probe_faces()
# assign the stimuli to targets and lures
input_patterns.probe_assignment([-1])

# set the memory sysem
mem_system = mem.memory_system(params, 1)
print('Memory system initialized')

mem_system.perform_scaling(input_patterns.study_all, 'study')
mem_system.perform_scaling(input_patterns.test, 'test')

# perform the memory test for different noise levels
print('Performing recognition memory task...')
for nn in params.noise:
    memory_test_basic(params, mem_system, params.offset[0], nn)

if params.save_metadata:
    save_data(params,
              'metadata_' + params.simID,
              file_format='pkl',
              folder=params.path + '/Log')
print('Done!')
コード例 #5
0
ファイル: static_infer.py プロジェクト: duyiqi17/PaddleRec
def main(args):
    paddle.seed(12345)

    # load config
    config = load_yaml(args.config_yaml)
    config["config_abs_dir"] = args.abs_dir
    # modify config from command
    if args.opt:
        for parameter in args.opt:
            parameter = parameter.strip()
            key, value = parameter.split("=")
            if type(config.get(key)) is int:
                value = int(value)
            if type(config.get(key)) is float:
                value = float(value)
            if type(config.get(key)) is bool:
                value = (True if value.lower() == "true" else False)
            config[key] = value
    # load static model class
    static_model_class = load_static_model_class(config)

    input_data = static_model_class.create_feeds(is_infer=True)
    input_data_names = [data.name for data in input_data]

    fetch_vars = static_model_class.infer_net(input_data)
    logger.info("cpu_num: {}".format(os.getenv("CPU_NUM")))

    use_gpu = config.get("runner.use_gpu", True)
    use_xpu = config.get("runner.use_xpu", False)
    use_auc = config.get("runner.use_auc", False)
    use_visual = config.get("runner.use_visual", False)
    auc_num = config.get("runner.auc_num", 1)
    test_data_dir = config.get("runner.test_data_dir", None)
    print_interval = config.get("runner.print_interval", None)
    model_load_path = config.get("runner.infer_load_path", "model_output")
    start_epoch = config.get("runner.infer_start_epoch", 0)
    end_epoch = config.get("runner.infer_end_epoch", 10)
    batch_size = config.get("runner.infer_batch_size", None)
    use_save_data = config.get("runner.use_save_data", False)
    reader_type = config.get("runner.reader_type", "DataLoader")
    use_fleet = config.get("runner.use_fleet", False)
    os.environ["CPU_NUM"] = str(config.get("runner.thread_num", 1))
    logger.info("**************common.configs**********")
    logger.info(
        "use_gpu: {}, use_xpu: {}, use_visual: {}, infer_batch_size: {}, test_data_dir: {}, start_epoch: {}, end_epoch: {}, print_interval: {}, model_load_path: {}"
        .format(use_gpu, use_xpu, use_visual, batch_size, test_data_dir,
                start_epoch, end_epoch, print_interval, model_load_path))
    logger.info("**************common.configs**********")

    if use_xpu:
        xpu_device = 'xpu:{0}'.format(os.getenv('FLAGS_selected_xpus', 0))
        place = paddle.set_device(xpu_device)
    else:
        place = paddle.set_device('gpu' if use_gpu else 'cpu')
    exe = paddle.static.Executor(place)
    # initialize
    exe.run(paddle.static.default_startup_program())

    if reader_type == 'DataLoader':
        test_dataloader = create_data_loader(config=config,
                                             place=place,
                                             mode="test")
    elif reader_type == "CustomizeDataLoader":
        test_dataloader = static_model_class.create_data_loader()

    # Create a log_visual object and store the data in the path
    if use_visual:
        from visualdl import LogWriter
        log_visual = LogWriter(args.abs_dir + "/visualDL_log/infer")
    step_num = 0

    for epoch_id in range(start_epoch, end_epoch):
        logger.info("load model epoch {}".format(epoch_id))
        model_path = os.path.join(model_load_path, str(epoch_id))
        load_static_model(paddle.static.default_main_program(),
                          model_path,
                          prefix='rec_static')

        epoch_begin = time.time()
        interval_begin = time.time()
        infer_reader_cost = 0.0
        infer_run_cost = 0.0
        reader_start = time.time()

        if use_auc:
            reset_auc(use_fleet, auc_num)

        #we will drop the last incomplete batch when dataset size is not divisible by the batch size
        assert any(
            test_dataloader()
        ), "test_dataloader's size is null, please ensure batch size < dataset size!"

        for batch_id, batch_data in enumerate(test_dataloader()):
            infer_reader_cost += time.time() - reader_start
            infer_start = time.time()
            fetch_batch_var = exe.run(
                program=paddle.static.default_main_program(),
                feed=dict(zip(input_data_names, batch_data)),
                fetch_list=[var for _, var in fetch_vars.items()])
            infer_run_cost += time.time() - infer_start
            if batch_id % print_interval == 0:
                metric_str = ""
                for var_idx, var_name in enumerate(fetch_vars):
                    metric_str += "{}: {}, ".format(
                        var_name, fetch_batch_var[var_idx][0])
                    if use_visual:
                        log_visual.add_scalar(
                            tag="infer/" + var_name,
                            step=step_num,
                            value=fetch_batch_var[var_idx][0])
                logger.info(
                    "epoch: {}, batch_id: {}, ".format(epoch_id, batch_id) +
                    metric_str +
                    "avg_reader_cost: {:.5f} sec, avg_batch_cost: {:.5f} sec, avg_samples: {:.5f}, ips: {:.2f} ins/s"
                    .format(
                        infer_reader_cost /
                        print_interval, (infer_reader_cost + infer_run_cost) /
                        print_interval, batch_size, print_interval *
                        batch_size / (time.time() - interval_begin)))
                interval_begin = time.time()
                infer_reader_cost = 0.0
                infer_run_cost = 0.0
            reader_start = time.time()
            step_num = step_num + 1

        metric_str = ""
        for var_idx, var_name in enumerate(fetch_vars):
            metric_str += "{}: {}, ".format(var_name,
                                            fetch_batch_var[var_idx][0])
        logger.info("epoch: {} done, ".format(epoch_id) + metric_str +
                    "epoch time: {:.2f} s".format(time.time() - epoch_begin))
        if use_save_data:
            save_data(fetch_batch_var, model_load_path)
コード例 #6
0
def main(args):
    paddle.seed(12345)

    # load config
    config = load_yaml(args.config_yaml)
    config["yaml_path"] = args.config_yaml
    config["config_abs_dir"] = args.abs_dir
    # modify config from command
    if args.opt:
        for parameter in args.opt:
            parameter = parameter.strip()
            key, value = parameter.split("=")
            if type(config.get(key)) is int:
                value = int(value)
            if type(config.get(key)) is float:
                value = float(value)
            if type(config.get(key)) is bool:
                value = (True if value.lower() == "true" else False)
            config[key] = value
    # load static model class
    static_model_class = load_static_model_class(config)
    input_data = static_model_class.create_feeds()
    input_data_names = [data.name for data in input_data]

    fetch_vars = static_model_class.net(input_data)

    #infer_target_var = model.infer_target_var
    logger.info("cpu_num: {}".format(os.getenv("CPU_NUM")))

    use_gpu = config.get("runner.use_gpu", True)
    use_xpu = config.get("runner.use_xpu", False)
    use_auc = config.get("runner.use_auc", False)
    use_visual = config.get("runner.use_visual", False)
    use_inference = config.get("runner.use_inference", False)
    auc_num = config.get("runner.auc_num", 1)
    train_data_dir = config.get("runner.train_data_dir", None)
    epochs = config.get("runner.epochs", None)
    print_interval = config.get("runner.print_interval", None)
    model_save_path = config.get("runner.model_save_path", "model_output")
    model_init_path = config.get("runner.model_init_path", None)
    batch_size = config.get("runner.train_batch_size", None)
    reader_type = config.get("runner.reader_type", "DataLoader")
    use_fleet = config.get("runner.use_fleet", False)
    use_save_data = config.get("runner.use_save_data", False)
    os.environ["CPU_NUM"] = str(config.get("runner.thread_num", 1))
    logger.info("**************common.configs**********")
    logger.info(
        "use_gpu: {}, use_xpu: {}, use_visual: {}, train_batch_size: {}, train_data_dir: {}, epochs: {}, print_interval: {}, model_save_path: {}"
        .format(use_gpu, use_xpu, use_visual, batch_size, train_data_dir,
                epochs, print_interval, model_save_path))
    logger.info("**************common.configs**********")

    if use_xpu:
        xpu_device = 'xpu:{0}'.format(os.getenv('FLAGS_selected_xpus', 0))
        place = paddle.set_device(xpu_device)
    else:
        place = paddle.set_device('gpu' if use_gpu else 'cpu')

    if use_fleet:
        from paddle.distributed import fleet
        strategy = fleet.DistributedStrategy()
        fleet.init(is_collective=True, strategy=strategy)
    if use_fleet:
        static_model_class.create_optimizer(strategy)
    else:
        static_model_class.create_optimizer()

    exe = paddle.static.Executor(place)
    # initialize
    exe.run(paddle.static.default_startup_program())

    if model_init_path is not None:
        load_static_parameter(paddle.static.default_main_program(),
                              model_init_path,
                              prefix='rec_static')

    last_epoch_id = config.get("last_epoch", -1)

    # Create a log_visual object and store the data in the path
    if use_visual:
        from visualdl import LogWriter
        log_visual = LogWriter(args.abs_dir + "/visualDL_log/train")
    else:
        log_visual = None
    step_num = 0

    if reader_type == 'QueueDataset':
        dataset, file_list = get_reader(input_data, config)
    elif reader_type == 'DataLoader':
        train_dataloader = create_data_loader(config=config, place=place)
    elif reader_type == "CustomizeDataLoader":
        train_dataloader = static_model_class.create_data_loader()
        reader_type = 'DataLoader'

    for epoch_id in range(last_epoch_id + 1, epochs):

        epoch_begin = time.time()
        if use_auc:
            reset_auc(use_fleet, auc_num)
        if reader_type == 'DataLoader':
            fetch_batch_var, step_num = dataloader_train(
                epoch_id, train_dataloader, input_data_names, fetch_vars, exe,
                config, use_visual, log_visual, step_num)
            metric_str = ""
            for var_idx, var_name in enumerate(fetch_vars):
                metric_str += "{}: {}, ".format(
                    var_name,
                    str(fetch_batch_var[var_idx]).strip("[]"))
            logger.info("epoch: {} done, ".format(epoch_id) + metric_str +
                        "epoch time: {:.2f} s".format(time.time() -
                                                      epoch_begin))
        elif reader_type == 'QueueDataset':
            fetch_batch_var = dataset_train(epoch_id, dataset, fetch_vars, exe,
                                            config)
            logger.info("epoch: {} done, ".format(epoch_id) +
                        "epoch time: {:.2f} s".format(time.time() -
                                                      epoch_begin))
        else:
            logger.info("reader type wrong")

        if use_fleet:
            trainer_id = paddle.distributed.get_rank()
            if trainer_id == 0:
                save_static_model(paddle.static.default_main_program(),
                                  model_save_path,
                                  epoch_id,
                                  prefix='rec_static')
        else:
            save_static_model(paddle.static.default_main_program(),
                              model_save_path,
                              epoch_id,
                              prefix='rec_static')
        if use_save_data:
            save_data(fetch_batch_var, model_save_path)

        if use_inference:
            feed_var_names = config.get("runner.save_inference_feed_varnames",
                                        [])
            feedvars = []
            fetch_var_names = config.get(
                "runner.save_inference_fetch_varnames", [])
            fetchvars = []
            for var_name in feed_var_names:
                if var_name not in paddle.static.default_main_program(
                ).global_block().vars:
                    raise ValueError(
                        "Feed variable: {} not in default_main_program, global block has follow vars: {}"
                        .format(
                            var_name,
                            paddle.static.default_main_program().global_block(
                            ).vars.keys()))
                else:
                    feedvars.append(paddle.static.default_main_program().
                                    global_block().vars[var_name])
            for var_name in fetch_var_names:
                if var_name not in paddle.static.default_main_program(
                ).global_block().vars:
                    raise ValueError(
                        "Fetch variable: {} not in default_main_program, global block has follow vars: {}"
                        .format(
                            var_name,
                            paddle.static.default_main_program().global_block(
                            ).vars.keys()))
                else:
                    fetchvars.append(paddle.static.default_main_program().
                                     global_block().vars[var_name])

            save_inference_model(model_save_path, epoch_id, feedvars,
                                 fetchvars, exe)