def main(): parser = get_parser() args = parser.parse_args() if args.joint and args.method not in BILINEAR_MODULES: raise ValueError( 'Joint Models are only supported for bilinear models: %s' % BILINEAR_MODULES.keys()) # Construct or load the experiment if args.load_experiment_from: print("Loading experiment from {}".format(args.load_experiment_from)) experiment = pickle.load(open(args.load_experiment_from, "rb")) elif args.dataset_path: experiment = Experiment() print("Constructing experiment...") experiment.construct_experiment(args.dataset_path, args.joint, args.typed_negatives) print("Experiment constructed!") if args.save_experiment_to: print("Saving experiment to {}".format(args.save_experiment_to)) pickle.dump(experiment, open(args.save_experiment_to, "wb")) else: raise ValueError( "Either dataset_name or load_experiment_from must be given") parameters = get_parameters(args) _ = train(experiment, parameters, args.method, args.joint, args.run_on_test)
def run_pac_rdp_simple( experiment_id: str, experiment_dir: Path, env: gym.Env, epsilon: float = 0.05, delta: float = 0.05, gamma: float = 0.99, max_depth: int = 10, update_frequency: int = 500, checkpoint_frequency: int = 500, **experiment_configs ): """Run PAC-RDL experiments (v2).""" agent_params = dict( env=env, epsilon=epsilon, delta=delta, gamma=gamma, max_depth=max_depth, update_frequency=update_frequency, ) callbacks = [RDPCheckpoint(checkpoint_frequency, experiment_dir / experiment_id)] experiment = Experiment( experiment_id, env, PacRdpAgentSimple, agent_params, callbacks=callbacks, **experiment_configs ) stats = experiment.run() return stats
def run_q_learning( experiment_id: str, experiment_dir: Path, env: gym.Env, epsilon: float = 0.1, alpha: float = 0.1, gamma: float = 0.99, checkpoint_frequency: int = 500, **experiment_configs ): """Run Q-Learning experiments.""" agent_params: Dict = dict( policy=make_eps_greedy_policy(epsilon), alpha=alpha, gamma=gamma ) callbacks = [Checkpoint(checkpoint_frequency, experiment_dir / experiment_id)] experiment = Experiment( experiment_id, env, QLearning, agent_params, callbacks=callbacks, **experiment_configs ) stats = experiment.run() return stats
def main(): # Experiment will read all arguments from the .ini file and command line interface (CLI). experiment = Experiment() if experiment.is_master: # Initialize bayesian optimizer optimizer = BayesianOptimizer( **experiment.experiment_arguments.get_arguments()) try: # Start optimizer in a thread optimizer.run_in_thread() worker = Worker(train_manager=experiment.train_manager, budget_decoder=experiment.budget_decoder, experiment_args=experiment.experiment_arguments, **experiment.experiment_arguments.get_arguments()) worker.run() finally: logger.info('Cleaning up optimizer object') optimizer.clean_pyro_file() else: worker = Worker(train_manager=experiment.train_manager, budget_decoder=experiment.budget_decoder, experiment_args=experiment.experiment_arguments, **experiment.experiment_arguments.get_arguments()) worker.run()
def check_nemo_files(self, mode="absence", summary=False): logging.info('Started') files = self.get_all_netcdf_files() print("files amount : %d" % len(files)) self.experiment = Experiment(date_from=self._date_from, date_to=self._date_to, resulted_files=[files], file_format=self.file_format) if mode == "absence": errors = self.experiment.check_for_absence() if summary: self.summary(errors, []) logging.info('Finished') return errors else: absence_errors = self.experiment.check_for_absence() vars_errors = self.experiment.check_oceanic_variables() if summary: self.summary(absence_errors, vars_errors) logging.info('Finished') return absence_errors + vars_errors
def test_check_for_absence_missing_file(self): results = [[ 'ARCTIC_1h_T_grid_T_20130101-20130101.nc', 'ARCTIC_1h_UV_grid_UV_20130101-20130101.nc' ]] format = FileFormat(format_file="../formats/nemo14-formats.yaml") experiment = Experiment(date_from=date(2013, 1, 1), date_to=date(2013, 1, 1), resulted_files=results, file_format=format) errors = experiment.check_for_absence() error = "Simulation results for day: 20130101 have some missing files or its names are incorrect" self.assertEqual(len(errors), 1) self.assertIn(error, errors[0])
def test_accel(self): """Tests the method accel""" experiment = Experiment(TasksMock()) experiment.accel(80, 48, 97, time.time()) experiment.accel(3, 42, 79, time.time()) self.assertRegexpMatches(experiment.get_output(), '^[0-9]* 80 48 97\n[0-9]* 3 42 79\n$')
def test_check_for_absence_missing_day(self): results = [[ 'ARCTIC_1h_ice_grid_TUV_20130102-20130102.nc', 'ARCTIC_1h_T_grid_T_20130102-20130102.nc', 'ARCTIC_1h_UV_grid_UV_20130102-20130102.nc' ], [ 'ARCTIC_1h_ice_grid_TUV_20130103-20130103.nc', 'ARCTIC_1h_T_grid_T_20130103-20130103.nc', 'ARCTIC_1h_UV_grid_UV_20130103-20130103.nc' ]] format = FileFormat(format_file="../formats/nemo14-formats.yaml") experiment = Experiment(date_from=date(2013, 1, 1), date_to=date(2013, 1, 3), resulted_files=results, file_format=format) errors = experiment.check_for_absence() error = "Simulation results were not found for day: 20130101" self.assertEqual(len(errors), 1) self.assertIn(error, errors[0])
def __init__(self): try: self.xwiimote = __import__("xwiimote") except ImportError: print "No xwiimote found" exit(1) else: self.fd_value = None self.dev = None self.ini_xwii() self.queue = Queue.Queue() self.poll = poll() self.poll.register(self.fd_value, POLLIN) self.loop_active = True gui = TasksGui() experiment = Experiment(gui) threading.Thread.__init__(self) self.start() output_file = None while self.loop_active: evt = self.queue.get() if evt[0] == 1: experiment.press_b_down(evt[1]) elif evt[0] == 2: experiment.press_b_up(evt[1]) if experiment.is_finished(): output_file = open( "data/record-" + str(time.time()) + ".txt", "w") output_file.write(experiment.get_output()) output_file.close() self.loop_active = False elif evt[0] == 3: experiment.accel(evt[1], evt[2], evt[3], evt[4]) elif evt[0] == 4: self.loop_active = False gui.quit()
def test_check_for_absence_correct(self): results = [[ 'ARCTIC_1h_ice_grid_TUV_20130102-20130102.nc', 'ARCTIC_1h_T_grid_T_20130102-20130102.nc', 'ARCTIC_1h_UV_grid_UV_20130102-20130102.nc' ], [ 'ARCTIC_1h_ice_grid_TUV_20130103-20130103.nc', 'ARCTIC_1h_T_grid_T_20130103-20130103.nc', 'ARCTIC_1h_UV_grid_UV_20130103-20130103.nc' ], [ 'ARCTIC_1h_ice_grid_TUV_20130101-20130101.nc', 'ARCTIC_1h_T_grid_T_20130101-20130101.nc', 'ARCTIC_1h_UV_grid_UV_20130101-20130101.nc' ]] format = FileFormat(format_file="../formats/nemo14-formats.yaml") experiment = Experiment(date_from=date(2013, 1, 1), date_to=date(2013, 1, 3), resulted_files=results, file_format=format) self.assertEqual(len(experiment.check_for_absence()), 0)
def main(): # Experiment will read all arguments from the .ini file and command line interface (CLI). experiment = Experiment() train_manager = experiment.train_manager experiment_arguments = experiment.experiment_arguments # Get important arguments validation_data_type = experiment_arguments.validation_data_type run_log_folder = experiment_arguments.run_log_folder if run_log_folder == "": raise RuntimeError( "Did you forget to specify run_log_folder? No idea which model to validate." ) # We don't force the user to use specific settings during evaluation but people make mistakes so display # warnings when unexpected behavior is spotted. if validation_data_type in [ SequenceDataReader.Train_Data, SequenceDataReader.Test_Data ]: logger.warning('Evaluation on %s data set (Are you sure?)' % validation_data_type) if experiment_arguments.random_mode != 0: logger.warning( 'Running evaluation, but random mode is not 0 (Are you sure?)') if experiment_arguments.continuous != 0: logger.warning( 'Running evaluation, but continuous is not 0 (Are you sure?)') if experiment_arguments.forget_state == 1: logger.warning( 'Running evaluation, but forget_state is set to 1 (Are you sure?)') if experiment_arguments.balanced == 1: logger.warning( 'Running evaluation, but balanced is set to 1 (Are you sure?)') # Do not save metrics in the train folder! metrics = train_manager.validate(experiment_arguments, save_metrics=False) # Use some other place to store results name = ('n%s_k%s' % (experiment_arguments.cv_n, experiment_arguments.cv_k)) metrics.save(directory=os.path.join( '/home/chrabasp/EEG_Result_Val/eval_val_top1_anomaly', name)) logger.info('%s Metrics:' % validation_data_type) logger.info( json.dumps(metrics.get_summarized_results(), indent=2, sort_keys=True))
def check_storage_with_ftp(self, storage, mode='absence', summary=False): logging.info('Started') files = self.combined_file_names(storage) print("files amount : %d" % len(files)) self.experiment = Experiment(date_from=self._date_from, date_to=self._date_to, resulted_files=[files], file_format=self.file_format) if mode == 'absence': errors = self.experiment.check_for_absence() if summary: self.summary(errors, []) logging.info('Finished') return errors
def main(args): pl.seed_everything(46) trainer = pl.Trainer(max_epochs=args.epochs, ) exp = Experiment( dataset_name=args.dataset_name, batch_size=args.batch_size, learning_rate=args.learning_rate, ) exp.fit(trainer) exp.save(trainer, args.save_path)
def main(): # Experiment will read all arguments from the .ini file and command line interface (CLI). experiment = Experiment() train_manager = experiment.train_manager experiment_arguments = experiment.experiment_arguments # Get important arguments validation_data_type = experiment_arguments.validation_data_type run_log_folder = experiment_arguments.run_log_folder if run_log_folder == "": raise RuntimeError( "Did you forget to specify run_log_folder? No idea which model to validate." ) # We don't force the user to use specific settings during evaluation but people make mistakes so display # warnings when unexpected behavior is spotted. if validation_data_type in [ BaseDataReader.Train_Data, BaseDataReader.Test_Data ]: logger.warning('Evaluation on %s data set (Are you sure?)' % validation_data_type) if experiment_arguments.random_mode != 0: logger.warning( 'Running evaluation, but random mode is not 0 (Are you sure?)') if experiment_arguments.continuous != 0: logger.warning( 'Running evaluation, but continuous is not 0 (Are you sure?)') if experiment_arguments.forget_state == 1: logger.warning( 'Running evaluation, but forget_state is set to 1 (Are you sure?)') if experiment_arguments.balanced == 1: logger.warning( 'Running evaluation, but balanced is set to 1 (Are you sure?)') # Do not save metrics in the train folder! metrics = train_manager.validate(experiment_arguments) logger.info('%s Metrics:' % validation_data_type) logger.info( json.dumps(metrics.get_summarized_results(), indent=2, sort_keys=True))
def main(): # Experiment will read all arguments from the .ini file and command line interface (CLI). experiment = Experiment() train_manager = experiment.train_manager experiment_arguments = experiment.experiment_arguments # Sample random configuration config_space = ExperimentArguments.read_configuration_space( experiment_arguments.config_space_file) random_config = config_space.sample_configuration() # Update experiment arguments using random configuration experiment_arguments = experiment_arguments.updated_with_configuration( random_config) # Run for different budgets full_budget_decoder = FullBudgetDecoder() adjusted_arguments_list = full_budget_decoder.adjusted_arguments( experiment_arguments, budget=None) for experiment_args in adjusted_arguments_list: # Initialize a new directory for each training run experiment_args.run_log_folder = train_manager.get_unique_dir() train_metrics = train_manager.train(experiment_args) valid_metrics = train_manager.validate( experiment_args, data_type=experiment_arguments.validation_data_type) # Print for the user logger.info('Train Metrics:') logger.info( json.dumps(train_metrics.get_summarized_results(), indent=2, sort_keys=True)) logger.info('%s Metrics:' % experiment_arguments.validation_data_type.title()) logger.info( json.dumps(valid_metrics.get_summarized_results(), indent=2, sort_keys=True))
def main(continuous=False): # Experiment will read all arguments from the .ini file and command line interface (CLI). experiment = Experiment() train_manager = experiment.train_manager experiment_arguments = experiment.experiment_arguments # Get important arguments validation_data_type = experiment_arguments.validation_data_type run_log_folder = experiment_arguments.run_log_folder # If run_log_folder is specified: # a) If it is empty then train from scratch # b) If it contains already trained model then will try to load it from that directory. # If run_log_folder is not specified initialize a new one and train from scratch if run_log_folder == "": experiment_arguments.run_log_folder = train_manager.get_unique_dir() while True: # Train and save the model train_metrics = train_manager.train(experiment_arguments) # Validate and save the results valid_metrics = train_manager.validate(experiment_arguments) # Print for the user logger.info('Train Metrics:') logger.info( json.dumps(train_metrics.get_summarized_results(), indent=2, sort_keys=True)) logger.info('%s Metrics:' % experiment_arguments.validation_data_type.title()) logger.info( json.dumps(valid_metrics.get_summarized_results(), indent=2, sort_keys=True)) if not continuous: break
def main(): # Initializing wandb config #wandb.init(project="GWD", id=config["id"], group="Fold0", config=config, resume="allow") # Create Experiment experiment = Experiment(config) # Attach WandB to Experiment #experiment.attach_wandb(wandb) # Load checkpoint experiment.load_checkpoint() # Run Experiment # import torch # checkpoint = experiment.runner.model.state_dict() # torch.save(checkpoint, "checkpoint/check14.pth") experiment.run()
def __init__(self, directory): """ Construct the trial and its experiment objects :param directory: for the trial :param config_file: name of the config file expected in the directory """ self.directory = directory self.config_file = os.path.join(self.directory, TRIAL_YAML) if not os.path.isdir(directory): logging.fatal(f'trial directory {self.directory} not found') exit(1) if not os.path.isfile(self.config_file): logging.fatal(f'config file {self.config_file} not found') exit(1) if not vutils.validate_json_doc(self.config_file, TRIAL_SCHEMA): logging.fatal( f'{self.config_file} failed validation against {TRIAL_SCHEMA}') with open(self.config_file) as f: self._raw_config = Dict(yaml.load(f, Loader=yaml.FullLoader)) logging.info(f'loading trial config from {self.config_file}') self.trial_id = self._raw_config['trial_id'] self.trial_desc = self._raw_config['trial_desc'] self.global_config = self._raw_config['global_config'] self.regression_test = self._raw_config['regression_test'] # create the experiment objects self.experiments = [] for exp in self._raw_config['experiments']: self.experiments.append(Experiment(self, exp))
def main(): parser = get_parser() args = parser.parse_args() dataset_path = args.dataset_path embedding_size = 5 neg_ratio = 3 batch_size = 500 max_iters = 2 valid_interval = 5 print_test = False checkpoint_file = "temp.pth" parameters = Parameters(embedding_size=embedding_size, neg_ratio=neg_ratio, batch_size=batch_size, max_iters=max_iters, valid_interval=valid_interval, print_test=print_test, type_loss="softmargin", margin=1.0) experiment = Experiment() experiment.construct_experiment(dataset_path, joint=True, typed_corrs=True) # ---- testing for vanilla models ---- # _ = train(experiment=experiment, parameters=parameters, method='transe_l2', joint=False, run_on_test=False) parameters.fact_loss = 'softmax' for method in ['transe_l2', 'transe_l1', 'complex', 'simple', 'distmult']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=False, run_on_test=False) print("Vanilla %s with %s runs! \n" % (method, parameters.fact_loss)) parameters.fact_loss = 'full-softmax' for method in ['complex', 'simple', 'distmult']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=False, run_on_test=False) print("Vanilla %s with %s runs! \n" % (method, parameters.fact_loss)) parameters.fact_loss = 'maxmargin' for method in ['transe_l1']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=False, run_on_test=False) print("Vanilla %s with %s runs! \n" % (method, parameters.fact_loss)) # --- testing for joint models ---# parameters.fact_loss = 'softmax' parameters.type_loss = 'softmargin' for method in ['distmult', 'complex']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=True, run_on_test=False) print("Joint %s with %s runs! \n" % (method, parameters.fact_loss)) parameters.fact_loss = 'full-softmax' parameters.type_loss = 'softmargin' for method in ['distmult', 'complex']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=True, run_on_test=False) print("Joint %s with %s runs! \n" % (method, parameters.fact_loss)) # --- testing for typed negatives ---# parameters.fact_loss = "softmax" parameters.type_loss = "softmargin" parameters.type_ratios = [0.8, 0.0, 0.2] for method in ['distmult']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=True, run_on_test=False) print("Joint %s with %s and typed negatives runs! \n" % (method, parameters.fact_loss)) for method in ['distmult']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=False, run_on_test=False) print("Vanilla %s with %s and typed negatives runs! \n" % (method, parameters.fact_loss)) parameters.fact_loss = "full-softmax" parameters.type_loss = "softmargin" parameters.type_ratios = [0.8, 0.0, 0.2] for method in ['distmult']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=True, run_on_test=False) print("Joint %s with %s and typed negatives runs! \n" % (method, parameters.fact_loss)) for method in ['distmult']: _ = train(experiment=experiment, parameters=parameters, method=method, joint=False, run_on_test=False) print("Vanilla %s with %s and typed negatives runs! \n" % (method, parameters.fact_loss))
def main(): numOfAgent = 4 manipulatedVariables = co.OrderedDict() # manipulatedVariables['damping'] = [0.0] # [0.0, 1.0] # manipulatedVariables['frictionloss'] = [0.0] # [0.0, 0.2, 0.4] # manipulatedVariables['masterForce'] = [0.0] manipulatedVariables['damping'] = [0.0, 0.5] # [0.0, 1.0] manipulatedVariables['frictionloss'] = [1.0] # [0.0, 0.2, 0.4] manipulatedVariables['masterForce'] = [0.0, 1.0] # [0.0, 2.0] chaseTrailVariables = manipulatedVariables.copy() catchTrailVariables = manipulatedVariables.copy() chaseTrailVariables['hideId'] = [3, 4 ] #0 wolf 1 sheep 2 master 3 4 distractor catchTrailVariables['hideId'] = [1] chaseTrailconditions = [ dict(list(specificValueParameter)) for specificValueParameter in it.product( *[[(key, value) for value in values] for key, values in chaseTrailVariables.items()]) ] # chaseTrailconditionsWithId =list (zip(range(len(conditions)),conditions )) catchTrailconditions = [ dict(list(specificValueParameter)) for specificValueParameter in it.product( *[[(key, value) for value in values] for key, values in catchTrailVariables.items()]) ] # catchTrailconditionsWithId =list (zip(range(len(conditions)),conditions )) # print('state',chaseTrailVariables,catchTrailVariables) conditionsWithId = list( zip(range(len(chaseTrailconditions) + len(catchTrailconditions)), chaseTrailconditions + catchTrailconditions)) # print(conditionsWithId) # conditions = [dict(list(specificValueParameter)) for specificValueParameter in it.product(*[[(key, value) for value in values] for key, values in manipulatedVariables.items()])] # conditionsWithId =list (zip(range(len(conditions)),conditions )) # print(conditionsWithId) conditions = chaseTrailconditions + catchTrailconditions conditions = [ condition.update({'conditionId': condtionId}) for condtionId, condition in zip(range(len(conditions)), conditions) ] # print (conditions) chaseTrailTrajetoryIndexList = range(2) chaseTrailManipulatedVariablesForExp = co.OrderedDict() chaseTrailManipulatedVariablesForExp['conditonId'] = range( len(chaseTrailconditions)) chaseTrailManipulatedVariablesForExp[ 'trajetoryIndex'] = chaseTrailTrajetoryIndexList chaseTrailProductedValues = it.product( *[[(key, value) for value in values] for key, values in chaseTrailManipulatedVariablesForExp.items()]) catchTrailTrajetoryIndexList = range(1) catchTrailManipulatedVariablesForExp = co.OrderedDict() catchTrailManipulatedVariablesForExp['conditonId'] = range( len(chaseTrailconditions), len(chaseTrailconditions) + len(catchTrailconditions)) catchTrailManipulatedVariablesForExp[ 'trajetoryIndex'] = catchTrailTrajetoryIndexList catchTrailProductedValues = it.product( *[[(key, value) for value in values] for key, values in catchTrailManipulatedVariablesForExp.items()]) # print(productedValues) exprimentVarableList = [ dict(list(specificValueParameter)) for specificValueParameter in chaseTrailProductedValues ] + [ dict(list(specificValueParameter)) for specificValueParameter in catchTrailProductedValues ] [ exprimentVarable.update( {'condition': conditionsWithId[exprimentVarable['conditonId']][1]}) for exprimentVarable in exprimentVarableList ] # exprimentVarableList = [exprimentVarable.update({'condition': conditionsWithId[exprimentVarable['conditonId']][1]}) for exprimentVarable in exprimentVarableList ] print(exprimentVarableList) print(len(exprimentVarableList)) numOfBlock = 1 numOfTrialsPerBlock = 1 designValues = createDesignValues( exprimentVarableList * numOfTrialsPerBlock, numOfBlock) positionIndex = [0, 1] FPS = 50 rawXRange = [-1, 1] rawYRange = [-1, 1] scaledXRange = [200, 600] scaledYRange = [200, 600] scaleTrajectoryInSpace = ScaleTrajectory(positionIndex, rawXRange, rawYRange, scaledXRange, scaledYRange) oldFPS = 50 numFramesToInterpolate = int(FPS / oldFPS - 1) interpolateState = InterpolateState(numFramesToInterpolate) scaleTrajectoryInTime = ScaleTrajectoryInTime(interpolateState) trajectoriesSaveDirectory = '../PataData/mujoco50Fps4Agent' # trajectoriesSaveDirectory =os.path.join(dataFolder, 'trajectory', modelSaveName) trajectorySaveExtension = '.pickle' evaluateEpisode = 120000 evalNum = 20 fixedParameters = { 'distractorNoise': 3.0, 'evaluateEpisode': evaluateEpisode } generateTrajectoryLoadPath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters) trajectoryDf = lambda condition: pd.read_pickle( generateTrajectoryLoadPath({ 'hideId': condition['hideId'], 'damping': condition['damping'], 'frictionloss': condition['frictionloss'], 'masterForce': condition['masterForce'], 'evalNum': evalNum, })) # trajectoryDf = lambda condition: pd.read_pickle(generateTrajectoryLoadPath(condition)) getTrajectory = lambda trajectoryDf: scaleTrajectoryInTime( scaleTrajectoryInSpace(trajectoryDf)) # getTrajectory = lambda trajectoryDf: scaleTrajectoryInSpace(trajectoryDf) print('loading') stimulus = { conditionId: getTrajectory(trajectoryDf(condition)) for conditionId, condition in conditionsWithId } print(stimulus[0][0][0]) print(stimulus[10][0][0]) print('loding success') # print(stimulus[1][1]) experimentValues = co.OrderedDict() experimentValues["name"] = input("Please enter your name:").capitalize() screenWidth = 800 screenHeight = 800 fullScreen = False initializeScreen = InitializeScreen(screenWidth, screenHeight, fullScreen) screen = initializeScreen() leaveEdgeSpace = 200 circleSize = 10 clickImageHeight = 80 lineWidth = 3 fontSize = 50 xBoundary = [leaveEdgeSpace, screenWidth - leaveEdgeSpace * 2] yBoundary = [leaveEdgeSpace, screenHeight - leaveEdgeSpace * 2] screenColor = THECOLORS['black'] lineColor = THECOLORS['white'] textColor = THECOLORS['white'] fixationPointColor = THECOLORS['white'] colorSpace = [(203, 164, 4, 255), (49, 153, 0, 255), (255, 90, 16, 255), (251, 7, 255, 255), (9, 204, 172, 255), (3, 28, 255, 255)] picturePath = os.path.join( os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'pictures') resultsPath = os.path.join( os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'results') introductionImage1 = pygame.image.load( os.path.join(picturePath, 'mujocoIntro1.png')) introductionImage2 = pygame.image.load( os.path.join(picturePath, 'mujocoIntro2.png')) finishImage = pygame.image.load(os.path.join(picturePath, 'over.jpg')) introductionImage1 = pygame.transform.scale(introductionImage1, (screenWidth, screenHeight)) introductionImage2 = pygame.transform.scale(introductionImage2, (screenWidth, screenHeight)) finishImage = pygame.transform.scale( finishImage, (int(screenWidth * 2 / 3), int(screenHeight / 4))) clickWolfImage = pygame.image.load( os.path.join(picturePath, 'clickwolf.png')) clickSheepImage = pygame.image.load( os.path.join(picturePath, 'clicksheep.png')) restImage = pygame.image.load(os.path.join(picturePath, 'rest.jpg')) drawImage = DrawImage(screen) drawText = DrawText(screen, fontSize, textColor) drawBackGround = DrawBackGround(screen, screenColor, xBoundary, yBoundary, lineColor, lineWidth) drawFixationPoint = DrawFixationPoint(screen, drawBackGround, fixationPointColor) drawImageClick = DrawImageClick(screen, clickImageHeight, drawText) drawState = DrawState(screen, circleSize, numOfAgent, positionIndex, drawBackGround) # drawStateWithRope = DrawStateWithRope(screen, circleSize, numOfAgent, positionIndex, ropeColor, drawBackground) writerPath = os.path.join(resultsPath, experimentValues["name"]) + '.csv' writer = WriteDataFrameToCSV(writerPath) displayFrames = 500 keysForCheck = {'f': 0, 'j': 1} checkHumanResponse = CheckHumanResponse(keysForCheck) trial = ChaseTrialMujoco(conditionsWithId, displayFrames, drawState, drawImage, stimulus, checkHumanResponse, colorSpace, numOfAgent, drawFixationPoint, drawText, drawImageClick, clickWolfImage, clickSheepImage, FPS) experiment = Experiment(trial, writer, experimentValues, drawImage, restImage, drawBackGround) restDuration = 20 drawImage(introductionImage1) drawImage(introductionImage2) experiment(designValues, restDuration) # self.darwBackground() drawImage(finishImage) print("Result saved at {}".format(writerPath))
def main(): numOfAgent = 4 manipulatedHyperVariables = co.OrderedDict() conditionList = [1, 2] #0;practice 1:off=0 2:off=12 manipulatedHyperVariables['ChaseCondition'] = conditionList trajetoryIndexList = range(20) manipulatedHyperVariables['TrajIndex'] = trajetoryIndexList exprimentVarableList = crateVariableProduct(manipulatedHyperVariables) print('loading') positionIndex = [0, 1] FPS = 60 dataFileDir = '../PataData/withoutLine' rawXRange = [-10, 10] rawYRange = [-10, 10] scaledXRange = [200, 600] scaledYRange = [200, 600] scaleTrajectory = ScaleTrajectory(positionIndex, rawXRange, rawYRange, scaledXRange, scaledYRange) oldFPS = 5 adjustFPS = AdjustDfFPStoTraj(oldFPS, FPS) getTrajectory = lambda trajectoryDf: scaleTrajectory( adjustFPS(trajectoryDf)) trajectoryDf = lambda condition, index: pd.read_pickle( os.path.join(dataFileDir, '{}'.format(condition) + ' ({}).pickle'. format(index))) stimulus = { condition: [ getTrajectory(trajectoryDf(condition, index)) for index in trajetoryIndexList ] for condition in conditionList } print('loding success') experimentValues = co.OrderedDict() experimentValues["name"] = input("Please enter your name:").capitalize() screenWidth = 800 screenHeight = 800 fullScreen = True initializeScreen = InitializeScreen(screenWidth, screenHeight, fullScreen) screen = initializeScreen() leaveEdgeSpace = 200 circleSize = 10 clickImageHeight = 80 lineWidth = 3 fontSize = 50 xBoundary = [leaveEdgeSpace, screenWidth - leaveEdgeSpace * 2] yBoundary = [leaveEdgeSpace, screenHeight - leaveEdgeSpace * 2] screenColor = THECOLORS['black'] lineColor = THECOLORS['white'] textColor = THECOLORS['white'] fixationPointColor = THECOLORS['white'] colorSpace = [(203, 164, 4, 255), (49, 153, 0, 255), (255, 90, 16, 255), (251, 7, 255, 255), (9, 204, 172, 255), (3, 28, 255, 255)] picturePath = os.path.join( os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'pictures') resultsPath = os.path.join( os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'results') introductionImage1 = pygame.image.load( os.path.join(picturePath, 'introduction1.png')) introductionImage2 = pygame.image.load( os.path.join(picturePath, 'introduction2.png')) finishImage = pygame.image.load(os.path.join(picturePath, 'over.jpg')) introductionImage1 = pygame.transform.scale(introductionImage1, (screenWidth, screenHeight)) introductionImage2 = pygame.transform.scale(introductionImage2, (screenWidth, screenHeight)) finishImage = pygame.transform.scale( finishImage, (int(screenWidth * 2 / 3), int(screenHeight / 4))) clickWolfImage = pygame.image.load( os.path.join(picturePath, 'clickwolf.png')) clickSheepImage = pygame.image.load( os.path.join(picturePath, 'clicksheep.png')) restImage = pygame.image.load(os.path.join(picturePath, 'rest.jpg')) drawImage = DrawImage(screen) drawText = DrawText(screen, fontSize, textColor) drawBackGround = DrawBackGround(screen, screenColor, xBoundary, yBoundary, lineColor, lineWidth) drawFixationPoint = DrawFixationPoint(screen, drawBackGround, fixationPointColor) drawImageClick = DrawImageClick(screen, clickImageHeight, drawText) drawState = DrawState(screen, circleSize, numOfAgent, positionIndex, drawBackGround) # drawStateWithRope = DrawStateWithRope(screen, circleSize, numOfAgent, positionIndex, ropeColor, drawBackground) writerPath = os.path.join(resultsPath, experimentValues["name"]) + '.csv' writer = WriteDataFrameToCSV(writerPath) displayFrames = 600 keysForCheck = {'f': 0, 'j': 1} checkHumanResponse = CheckHumanResponse(keysForCheck) trial = ChaseTrial(conditionList, displayFrames, drawState, drawImage, stimulus, checkHumanResponse, colorSpace, numOfAgent, drawFixationPoint, drawText, drawImageClick, clickWolfImage, clickSheepImage, FPS) experiment = Experiment(trial, writer, experimentValues, drawImage, restImage, drawBackGround) numOfBlock = 1 numOfTrialsPerBlock = 1 designValues = createDesignValues( exprimentVarableList * numOfTrialsPerBlock, numOfBlock) restDuration = 20 drawImage(introductionImage1) drawImage(introductionImage2) experiment(designValues, restDuration) # self.darwBackground() drawImage(finishImage) print("Result saved at {}".format(writerPath))
valid_flatten = (valid_flatten - train_mean) / train_std early_stop = 10 experiments_config = { 'nb_epochs': args.nb_epochs, 'batch_size': args.batch_size, 'margin': args.margin, 'random_state': args.random_state, 'cuda': args.cuda, 'train_mean': [float(t) for t in train_mean], 'train_std': [float(t) for t in train_std], 'early_stop': early_stop, } xp = Experiment(experiments_config, args.results_path) print(experiments_config) model = PrefNet(nb_obj=nb_obj) min_model = np.min(model.predict(train_flatten)) max_model = np.max(model.predict(train_flatten)) while (max_model - min_model) < 0.1: model = PrefNet(nb_obj=nb_obj) min_model = np.min(model.predict(train_flatten)) max_model = np.max(model.predict(train_flatten)) print(' [-] min_model {:.3f}, max_model {:.3f}'.format( min_model, max_model)) best_weights, record = train(model, train_flatten, \
info_config = {'calc_performance_every': 1, 'filename': filename, 'tensorboard': {'enabled': False, 'path': '../tb/' + filename, 'period': 200, 'graph': False, 'weights': True, 'gradients': True}, 'profiling': {'enabled': False, 'path': '../profiling/' + filename}, 'record_metrics': ['tr', 'va', 'te'], 'record_output': [], 'record_dataset': ['te']} result_config = {'save_results': True, 'path': '../numerical_results/' + filename + '_' + str(task_id), 'plot_results': False, 'print_final_stats': True} candidate_config ={'period': 10000000} nn_datas = [] transfer_idcs = [] tr_idcs = [] transfer_idcs = np.load('../numerical_results/' + filename_old + '_' + str(task_id) + '_transferidcs.npy') tr_idcs = np.load('../numerical_results/' + filename_old + '_' + str(task_id) + '_tr_idcs.npy') tr_set_idcs = np.concatenate([transfer_idcs, tr_idcs], axis=1) for run in range(runs): experiment = Experiment(data_config, candidate_config, info_config) nn_data, u = experiment.train(transfer_idc=tr_set_idcs[run, :], nn_config=nn_config, train_config=train_config, run=run) nn_datas.append(nn_data) if result_config['save_results']: save_to_file(nn_datas, result_config['path'])
def test_p_pattern(): e = Experiment() assert e.p_pattern('HHTHTT') == approx(1 / (2**6), rel=1e-2) assert e.p_pattern('HHTHTTHH') == approx(1 / (2**8), rel=1e-2)
def test_p_2_heads(): e = Experiment() assert e.p_2_heads() == approx(0.25, rel=1e-2)
'out_seq_len': [1, 1, 2, 4, 12], 'zero_padding': [0, 0, 0, 2, 2], 'min_errors': [3.5, 3.5, 3, 2, 0.1], 'max_epochs': [20, 20, 20, 20, 20] }, 'task_id': task_id } info_config = { 'calc_performance_every': 1, 'include_pred': False, 'include_out': False } result_config = { 'save_results': False, 'filename': '../numerical_results/test', 'plot_results': False, 'print_final_stats': True } result_dicts = [] for run in range(runs): experiment = Experiment() result_dicts.append( experiment.train(rnn_config, labelled_data_config, training_config, info_config)) print('----------------------------') print(result_dicts[0]) process_results(result_config, result_dicts)
if test is not None: print(' [-]: using {} testing examples (~{}%)'.format( len(test_data), int(100 * len(valid_data) / len(data)))) else: print(' [-]: using no testing examples (0%)') ## Normalization mean, std = np.mean(train_data), np.std(train_data) config['mean'], config['std'] = float(mean), float(std) train_data = normalize_data(train_data, mean, std) valid_data = normalize_data(valid_data, mean, std) if test is not None: test_data = normalize_data(test_data, mean, std) ## setup the environment folder xp = Experiment(config) ## save split histrogram if test is not None: plot_split_histogram(train_targets, valid_targets, test_targets, rootdir=xp.rootdir()) ## data augmentation train_data, train_targets = data_augmentation(train_data, train_targets) if args.verbose: print(' [-]: data augmentation to {} training examples'.format( len(train_data))) print()
P_LABELS = Config.get("Default", "P_LABELS") P_PIXEL_MASK = Config.get("Default", "P_PIXEL_MASK") METHOD = Config.get("Default", "METHOD") ts = time.time() dt = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') result_path = os.path.join("results", "archive", METHOD, dt) os.makedirs(result_path) logging.basicConfig(filename=os.path.join(result_path, "info.log"), level=logging.INFO) model_path = os.path.join("models", METHOD, dt) os.makedirs(model_path) if METHOD == 'STAE': net = SpatialTemporalAutoencoder(tvol=TVOL, alpha=ALPHA, batch_size=BATCH_SIZE, lambd=LAMBDA) d = DataIteratorStae(P_TRAIN, P_TEST, P_LABELS, P_PIXEL_MASK, batch_size=BATCH_SIZE, tvol=TVOL, taug=TAUG) elif METHOD == 'CONVAE2D': net = ConvAE2d(tvol=TVOL, alpha=ALPHA, batch_size=BATCH_SIZE, lambd=LAMBDA) d = DataIteratorNormal(P_TRAIN, P_TEST, P_LABELS, P_PIXEL_MASK, batch_size=BATCH_SIZE, tvol=TVOL, taug=TAUG) elif METHOD == 'EXP': net = Experiment(tvol=TVOL, alpha=ALPHA, batch_size=BATCH_SIZE, lambd=LAMBDA) d = DataIteratorNormal(P_TRAIN, P_TEST, P_LABELS, P_PIXEL_MASK, batch_size=BATCH_SIZE, tvol=TVOL, taug=TAUG) else: raise ValueError('Incorrect method specification') frame_auc, frame_eer, pixel_auc, pixel_eer = train(data=d, model=net, num_iteration=NUM_ITER, result_path=result_path, model_path=model_path) logging.info("Best frame-level area under the roc curve: {0:g}".format(frame_auc)) logging.info("Frame-level equal error rate corresponding to this: {0:g}".format(frame_eer)) logging.info("Pixel-level area under the roc curve corresponding to this: {0:g}".format(pixel_auc)) logging.info("Pixel-level equal error rate corresponding to this: {0:g}".format(pixel_eer))
ext=EXT, batch_size=BATCH_SIZE, tvol=TVOL) net = SpatialTemporalAutoencoder(data=d, alpha=ALPHA, lambd=LAMBDA) elif METHOD == 'CONVAE2D': d = DataIteratorNormal(data_dir=DATA_DIR, ext=EXT, batch_size=BATCH_SIZE, tvol=TVOL) net = ConvAE2d(data=d, alpha=ALPHA, lambd=LAMBDA) elif METHOD == 'EXP': d = DataIteratorNormal(data_dir=DATA_DIR, ext=EXT, batch_size=BATCH_SIZE, tvol=TVOL) net = Experiment(data=d, alpha=ALPHA, lambd=LAMBDA) else: raise ValueError('Incorrect method specification') frame_auc, frame_eer = train(model=net, num_iteration=NUM_ITER, data_dir=DATA_DIR, ext=EXT, frame_gt_path=FRAME_GT_PATH, result_path=result_path, model_path=model_path) logging.info( "Best frame-level area under the roc curve: {0:g}".format(frame_auc)) logging.info( "Frame-level equal error rate corresponding to this: {0:g}".format( frame_eer))
def main(): screenWidth = 800 screenHeight = 800 FPS = 60 fullScreen = False initializeScreen = InitializeScreen(screenWidth, screenHeight, fullScreen) screen = initializeScreen() saveImage = False saveImageFile = 'noRopeCondition1' numOfAgent = 4 leaveEdgeSpace = 200 circleSize = 10 clickImageHeight = 80 lineWidth = 3 fontSize = 50 xBoundary = [leaveEdgeSpace, screenWidth - leaveEdgeSpace * 2] yBoundary = [leaveEdgeSpace, screenHeight - leaveEdgeSpace * 2] stimulusXBoundary = [xBoundary[0] + circleSize, xBoundary[1] - circleSize] stimulusYBoundary = [yBoundary[0] + circleSize, yBoundary[1] - circleSize] screenColor = THECOLORS['black'] lineColor = THECOLORS['white'] textColor = THECOLORS['white'] fixationPointColor = THECOLORS['white'] ropeColor = THECOLORS['white'] colorSpace = [ THECOLORS['grey'], THECOLORS['red'], THECOLORS['blue'], THECOLORS['yellow'], THECOLORS['purple'], THECOLORS['orange'] ] random.shuffle(colorSpace) # circleColorList = [THECOLORS['grey']] * numOfAgent # circleColorList = [THECOLORS['red'], THECOLORS['green'], THECOLORS['grey'], THECOLORS['yellow']] circleColorList = colorSpace[:numOfAgent] stateIndex = ['wolf', 'sheep', 'master', 'distractor'] identityColorPairs = dict(zip(stateIndex, circleColorList)) picturePath = os.path.join( os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'pictures') resultsPath = os.path.join( os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'results') introductionImage = pygame.image.load( os.path.join(picturePath, 'introduction2.png')) finishImage = pygame.image.load(os.path.join(picturePath, 'over.jpg')) introductionImage = pygame.transform.scale(introductionImage, (screenWidth, screenHeight)) finishImage = pygame.transform.scale( finishImage, (int(screenWidth * 2 / 3), int(screenHeight / 4))) clickWolfImage = pygame.image.load( os.path.join(picturePath, 'clickwolf.png')) clickSheepImage = pygame.image.load( os.path.join(picturePath, 'clicksheep.png')) restImage = pygame.image.load(os.path.join(picturePath, 'rest.jpg')) # restImage = pygame.transform.scale(restImage, (screenWidth, screenHeight)) drawImage = DrawImage(screen) drawText = DrawText(screen, fontSize, textColor) drawBackGround = DrawBackGround(screen, screenColor, xBoundary, yBoundary, lineColor, lineWidth) drawFixationPoint = DrawFixationPoint(screen, drawBackGround, fixationPointColor) drawImageClick = DrawImageClick(screen, clickImageHeight, drawText) drawState = DrawState(drawBackGround, numOfAgent, screen, circleSize) drawStateWithRope = DrawStateWithRope(drawBackGround, numOfAgent, screen, circleSize, ropeColor) conditionList = [1] #[1, 2, 3, 4] trajetoryIndexList = [1] # [1, 2, 3, 4, 5] dataFileDir = '../PataData' dataSetBoundary = [26, 26] generateTrajetoryData = GenerateTrajetoryData(dataFileDir, stimulusXBoundary, stimulusYBoundary, dataSetBoundary) stimulus = { condition: generateTrajetoryData(condition, trajetoryIndexList) for condition in conditionList } experimentValues = co.OrderedDict() # experimentValues["name"] = input("Please enter your name:").capitalize() experimentValues["name"] = 'csz' writerPath = os.path.join(resultsPath, experimentValues["name"]) + '.csv' writer = WriteDataFrameToCSV(writerPath) displayFrames = FPS * 3 keysForCheck = {'f': 0, 'j': 1} checkHumanResponse = CheckHumanResponse(keysForCheck) trial = ChaseTrial(displayFrames, drawState, drawImage, stimulus, checkHumanResponse, colorSpace, numOfAgent, drawFixationPoint, drawText, drawImageClick, clickWolfImage, clickSheepImage, FPS, saveImage, saveImageFile) experiment = Experiment(trial, writer, experimentValues, drawImage, restImage) numOfBlock = 2 numOfTrialsPerBlock = 1 designValues = createDesignValues(conditionList * numOfTrialsPerBlock, numOfBlock) print(designValues) drawImage(introductionImage) experiment(designValues, numOfTrialsPerBlock) # drawImage(finishImage) print("Result saved at {}".format(writerPath))