コード例 #1
0
ファイル: higgs.py プロジェクト: arindamrc/distributed-nn
def main():
    np.random.seed(87655678)

    numberOfNodes   = 1 
    batch_size      = 100
    inputStream     = inputs.higgs.HiggsStream("/home/arc/Desktop/shared/Uni Bonn Study Materials/Data Science Lab/HIGGS.csv", "higgs", batch_size=batch_size)
    lossFunction    = SquaredLoss()    
    learningRate    = 0.1
    lmbda           = 0.0001
    sigma           = 4.5
    kernel          = GaussianKernel(sigma)
    noModelComp     = NoCompression()
    modelComp       = Projection(0.1, kernel)
    linupdateRule   = StochasticGradientDescent(lossFunction, lmbda, learningRate)
    updateRule      = KernelStochasticGradientDescent(lossFunction, lmbda, learningRate)
    anchorBatchSize = 20
    linmodel        = LinearClassificationModel()
    model           = KernelClassificationModel(kernel, noModelComp)  
    modelComp       = KernelClassificationModel(kernel, modelComp)  
    dnnModel        = DNNModel(indim = 28, outdim = 1, hidden_layers = 5, neurons = 300, batch_size=100)

    envs    = [
               #baselines
                PredictionEnvironment(   numberOfNodes   = numberOfNodes,
                                         updateRule      = linupdateRule,
                                         model           = dnnModel,
                                         batchSizeInMacroRounds  = numberOfNodes*500,
                                         syncOperator    = NoSyncOperator()),
#                 PredictionEnvironment(   numberOfNodes   = numberOfNodes,
#                                          updateRule      = linupdateRule,
#                                          model           = dnnModel,
#                                          syncOperator    = CentralSyncOperator(),
#                                          serial          = True),
#                 PredictionEnvironment(   numberOfNodes   = numberOfNodes,
#                                          updateRule      = linupdateRule,
#                                          model           = dnnModel,
#                                          syncOperator    = CentralSyncOperator(),
#                                          serial          = False),
#                 #static averaging
#                 PredictionEnvironment(   numberOfNodes           = numberOfNodes,
#                                          updateRule              = linupdateRule,
#                                          model                   = dnnModel,
#                                          batchSizeInMacroRounds  = numberOfNodes*500,
#                                          syncOperator            = StaticSyncOperator()), 
#                 #dynamic averaging    
#                 PredictionEnvironment(   numberOfNodes           = numberOfNodes,
#                                          updateRule              = linupdateRule,
#                                          model                   = dnnModel,
#                                          batchSizeInMacroRounds  = 5000,
#                                          syncOperator            = HedgedDistBaseSync(1.0),),                          
               ]

    

    #experiment.runParameterEvaluation(inputStream, envs, numberOfNodes*100)
    experiment.run(inputStream, envs, MaxNumberOfExamplesCondition(4*250000/batch_size))   
コード例 #2
0
ファイル: node.py プロジェクト: vladum/tribler-experiments
def stage_master_main():
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind('tcp://*:' + STAGE_MASTER_PORT)
    i = 0
    settings = []
    while i < get_nodes_no() - 1:
        s = eval(socket.recv())
        settings += [s]
        print 'Received settings:', s
        socket.send('ACK')
        i += 1
    print 'Got all peer settings:', settings, '. Starting experiment.'
    run(settings)
コード例 #3
0
def main():
    args = get_args()
    _setup_logging()

    # If job_dir_reuse is False then remove the job_dir if it exists
    logging.info("Resume training:", args.reuse_job_dir)
    if not args.reuse_job_dir:
        if tf.gfile.Exists(args.job_dir):
            tf.gfile.DeleteRecursively(args.job_dir)
            logging.info("Deleted job_dir {} to avoid re-use".format(
                args.job_dir))
    else:
        logging.info("Reusing job_dir {} if it exists".format(args.job_dir))

    run_config = experiment.create_run_config(args)
    logging.info("Job directory:", run_config.model_dir)

    # Compute the number of training steps
    if args.train_size is not None and args.num_epochs is not None:
        args.train_steps = int(
            (args.train_size / args.batch_size) * args.num_epochs)
    else:
        args.train_steps = args.train_steps

    logging.info("Train size: {}.".format(args.train_size))
    logging.info("Epoch count: {}.".format(args.num_epochs))
    logging.info("Batch size: {}.".format(args.batch_size))
    logging.info("Training steps: {} ({}).".format(
        args.train_steps,
        "supplied" if args.train_size is None else "computed"))
    logging.info("Evaluate every {} steps.".format(args.eval_frequency_secs))

    # Create the estimator
    estimator = model.create(args, run_config)
    logging.info("creating an estimator: {}".format(type(estimator)))

    # Run the train and evaluate experiment
    time_start = datetime.utcnow()
    logging.info("Experiment started...")
    logging.info(".......................................")

    # Run experiment
    experiment.run(estimator, args)

    time_end = datetime.utcnow()
    logging.info(".......................................")
    logging.info("Experiment finished.")
    time_elapsed = time_end - time_start
    logging.info("Experiment elapsed time: {} seconds".format(
        time_elapsed.total_seconds()))
コード例 #4
0
ファイル: task.py プロジェクト: zhang01GA/cloudml-samples
def main():
  args = get_args()
  _setup_logging()

  # If job_dir_reuse is False then remove the job_dir if it exists
  logging.info("Resume training:", args.reuse_job_dir)
  if not args.reuse_job_dir:
    if tf.gfile.Exists(args.job_dir):
      tf.gfile.DeleteRecursively(args.job_dir)
      logging.info("Deleted job_dir {} to avoid re-use".format(args.job_dir))
  else:
    logging.info("Reusing job_dir {} if it exists".format(args.job_dir))

  run_config = experiment.create_run_config(args)
  logging.info("Job directory:", run_config.model_dir)

  # Compute the number of training steps
  if args.train_size is not None and args.num_epochs is not None:
    args.train_steps = int(
      (args.train_size / args.batch_size) * args.num_epochs)
  else:
    args.train_steps = args.train_steps

  logging.info("Train size: {}.".format(args.train_size))
  logging.info("Epoch count: {}.".format(args.num_epochs))
  logging.info("Batch size: {}.".format(args.batch_size))
  logging.info("Training steps: {} ({}).".format(
    args.train_steps, "supplied" if args.train_size is None else "computed"))
  logging.info("Evaluate every {} steps.".format(args.eval_frequency_secs))

  # Create the estimator
  estimator = model.create(args, run_config)
  logging.info("creating an estimator: {}".format(type(estimator)))

  # Run the train and evaluate experiment
  time_start = datetime.utcnow()
  logging.info("Experiment started...")
  logging.info(".......................................")

  # Run experiment
  experiment.run(estimator, args)

  time_end = datetime.utcnow()
  logging.info(".......................................")
  logging.info("Experiment finished.")
  time_elapsed = time_end - time_start
  logging.info(
    "Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
コード例 #5
0
def main():
    print("Loading models...")
    with open(GRAPH_PATH, 'r') as f:
        G = pickle.load(f)
    with open(ETA_MODEL_PATH, 'r') as f:
        eta_model = pickle.load(f)
    num_fleets = NUM_FLEETS

    geohash_table = pd.read_csv(GEOHASH_TABLE_PATH, index_col='geohash')

    env = FleetSimulator(G, eta_model, CYCLE, ACTION_UPDATE_CYCLE)
    agent = Agent(geohash_table,
                  CYCLE,
                  ACTION_UPDATE_CYCLE,
                  DEMAND_FORECAST_INTERVAL,
                  training=True,
                  load_network=LOAD_NETWORK)
    if INITIAL_MEMORY:
        with open(INITIAL_MEMORY_PATH, 'r') as f:
            ex_memory = pickle.load(f)
        agent.init_train(3000, ex_memory)

    trip_chunks = load_trip_chunks(TRIP_PATH, NUM_TRIPS,
                                   DURATION)[:NUM_EPISODES]
    for episode, (trips, date, dayofweek, minofday) in enumerate(trip_chunks):
        # num_fleets = int(np.sqrt(len(trips)/120000.0) * NUM_FLEETS)
        env.reset(num_fleets, trips, dayofweek, minofday)
        _, requests, _, _, _ = env.step()
        agent.reset(requests, env.dayofweek, env.minofday)
        num_steps = DURATION / CYCLE - NO_OP_STEPS

        print(
            "#############################################################################"
        )
        print(
            "EPISODE: {:d} / DATE: {:d} / DAYOFWEEK: {:d} / MINUTES: {:d} / VEHICLES: {:d}"
            .format(episode, date, env.dayofweek, env.minofday, num_fleets))
        score, _ = run(env,
                       agent,
                       num_steps,
                       average_cycle=AVERAGE_CYCLE,
                       cheat=True)
        describe(score)
        score.to_csv(SCORE_PATH + 'score_dqn' + str(episode) + '.csv')

        if episode > 0 and episode % 10 == 0:
            print("Saving Experience Memory: {:d}").format(episode)
            with open(SCORE_PATH + 'ex_memory' + str(episode) + '.pkl',
                      'wb') as f:
                pickle.dump(agent.replay_memory, f)
コード例 #6
0
ファイル: eval.py プロジェクト: vkjdev/FleetAI
def main():

    print("Loading models...")
    with open(GRAPH_PATH, 'r') as f:
        G = pickle.load(f)
    with open(ETA_MODEL_PATH, 'r') as f:
        eta_model = pickle.load(f)

    # with open(DEMAND_MODEL_PATH + 'model.json', 'r') as f:
    #     demand_model = f.read()
    # demand_model = model_from_json(demand_model)
    # demand_model.load_weights(DEMAND_MODEL_PATH + 'model.h5')

    geohash_table = pd.read_csv(GEOHASH_TABLE_PATH, index_col='geohash')
    # eta_table = pd.read_csv(ETA_TABLE_PATH, index_col=['dayofweek', 'hour', 'pickup_zone'])
    # pdest_table = pd.read_csv(PDEST_TABLE_PATH, index_col=['dayofweek', 'hour', 'pickup_zone'])

    env = FleetSimulator(G, eta_model, CYCLE, ACTION_UPDATE_CYCLE)
    # agent = Agent(geohash_table, eta_table, pdest_table, demand_model, CYCLE)
    agent = Agent(geohash_table,
                  CYCLE,
                  ACTION_UPDATE_CYCLE,
                  30,
                  training=False,
                  load_network=True)

    trip_chunks = load_trip_eval(TRIP_PATH, NUM_TRIPS)
    for episode, (trips, date, dayofweek, minofday) in enumerate(trip_chunks):
        env.reset(NUM_FLEETS, trips, dayofweek, minofday)
        print(
            "#############################################################################"
        )
        print("EPISODE: {:d} / DATE: {:d} / DAYOFWEEK: {:d} / MINUTES: {:d}".
              format(episode, date, env.dayofweek, env.minofday))
        score, vscore = run(env,
                            agent,
                            NUM_STEPS,
                            no_op_steps=30,
                            average_cycle=30)
        describe(score)
        score.to_csv(SCORE_PATH + 'score' + str(dayofweek) + '.csv',
                     index=False)
        vscore.to_csv(SCORE_PATH + 'vscore' + str(dayofweek) + '.csv',
                      index=False)
コード例 #7
0
def main():
    """Main function"""
    if len(argv) != 5:
        parameter_error_and_exit()

    try:
        board_size = int(argv[1])
        start_row = int(argv[2])
        start_col = int(argv[3])
        generation_max = int(argv[4])
    except (ValueError):
        parameter_error_and_exit()

    print("Running on a {0}x{0} board".format(board_size))
    print("With knight starting at ({},{})".format(start_row, start_col))
    print("and maximum generations allowed: {}".format(generation_max))

    best_path_found = experiment.run(board_size, start_row, start_col,
                                     generation_max)

    print("Output: {}".format(best_path_found))
コード例 #8
0
ファイル: test_dqn.py プロジェクト: vkjdev/FleetAI
def main():
    print("Loading models...")
    with open(GRAPH_PATH, 'r') as f:
        G = pickle.load(f)
    with open(ETA_MODEL_PATH, 'r') as f:
        eta_model = pickle.load(f)
    geohash_table = pd.read_csv(GEOHASH_TABLE_PATH, index_col='geohash')

    env = FleetSimulator(G, eta_model, CYCLE, ACTION_UPDATE_CYCLE)
    agent = Agent(geohash_table,
                  CYCLE,
                  ACTION_UPDATE_CYCLE,
                  training=False,
                  load_netword=True)

    trip_chunks = ex.load_trip_eval(TRIP_PATH, NUM_TRIPS,
                                    DURATION)[:NUM_EPISODES]
    for episode, (trips, date, dayofweek, minofday) in enumerate(trip_chunks):
        if minofday < 60 * 6:
            num_fleets = NUM_FLEETS_MN
        else:
            num_fleets = NUM_FLEETS

        env.reset(num_fleets, trips, dayofweek, minofday)
        _, requests, _, _, _ = env.step()
        for _ in range(NO_OP_STEPS - 1):
            _, requests_, _, _, _ = env.step()
            requests = requests.append(requests_)
        agent.reset(requests, env.dayofweek, env.minofday)

        print(
            "#############################################################################"
        )
        print(
            "EPISODE: {:d} / DATE: {:d} / DAYOFWEEK: {:d} / MINUTES: {:d} / VEHICLES: {:d}"
            .format(episode, date, env.dayofweek, env.minofday, num_fleets))
        score = ex.run(env, agent, NUM_STEPS, average_cycle=60)
        ex.describe(score)
        score.to_csv(SCORE_PATH + 'score_lp' + str(date) + '-' +
                     str(minofday / 60) + '.csv')
コード例 #9
0
ファイル: FRQ.py プロジェクト: RicoJia/Machine_Learning
def frq_9():
    train_images, train_labels = load_mnist('training',
                                            selection=slice(
                                                0, 500))  # doctest: +SKIP
    test_images, test_labels = load_mnist('testing', selection=slice(0, 500))
    random.seed(1)
    conditions = {"kernel": ["linear", "poly", "rbf"], "C": [0.1, 1, 10]}
    results = run(svm.SVC(gamma='auto'), "grid_search", conditions,
                  train_images, train_labels)

    results = sorted(results,
                     key=lambda result: list(result[0].values())[0],
                     reverse=True)
    print("results: ", results)

    # # #build a table in which each row is kernel value, each column is slack value C. we know it's a 3x3 table
    for row in range(3):
        print_ls = []
        for cln in range(3):
            element = list(results.pop(0))
            for index in range(1, len(element)):
                element[index] = round_sig(element[index])
            print_ls.append(element)
        print(print_ls)
コード例 #10
0
def main():
    #aTargetStocks   = ["ETR:ADS", "ETR:ALV", "ETR:BAS", "ETR:BEI", "ETR:BMW", "ETR:CBK", "ETR:DAI", "ETR:DB1", "ETR:DBK", "ETR:DPW", "ETR:DTE", "ETR:EOAN", "ETR:FME", "ETR:FRE", "ETR:HEI", "ETR:HEN3", "ETR:IFX", "ETR:LHA", "ETR:LIN", "ETR:MEO", "ETR:MRK", "ETR:MUV2", "ETR:RWE", "ETR:SAP", "ETR:SDF", "ETR:SIE", "ETR:TKA", "ETR:VOW3"]
    #inputStream     = StockPriceFeatureStream(dataset = StockPriceFeatureStream.DAX30, label = StockPriceFeatureStream.FV, targetStock=aTargetStocks)    
    #input_stream_pert = InputPerturbatorUnivariateGaussian(input_stream, (0.0,1.0), numberOfNodes)
    #numberOfStocks  = 300
    
    numberOfNodes   = 4      
    anchorBatchSize = 1
    
    inputStream     = DriftStockPrices(nodes = numberOfNodes, driftProb = 0.001, numberOfStocks = None, normalize=True) #driftProb of 0,028 is about once every 8970 examples
    epsilon         = 0.1
    lossFunction    = EpsilonInsensitiveLoss(epsilon)    
    learningRate    = 0.000001
    lmbda           = 0.0001
    sigma           = 1.0
    
    linearUpdateRule= StochasticGradientDescent(lossFunction, lmbda, learningRate)#PassAggRegression()    
    linearModel     = LinearRegressionModel()
    
    epsilon         = 0.001
    
    kernel          = GaussianKernel(sigma)
    truncOp         = Projection(epsilon, kernel)# NoCompression()
    kernelModel           = KernelRegressionModel(kernel, truncOp)     
    kernelUpdateRule      = KernelStochasticGradientDescent(lossFunction, lmbda, learningRate)
    
    envs    = [
               #baselines
                PredictionEnvironment(   numberOfNodes   = numberOfNodes,
                                         updateRule      = linearUpdateRule,
                                         model           = linearModel,
                                         syncOperator    = NoSyncOperator()),
                PredictionEnvironment(   numberOfNodes   = numberOfNodes,
                                         updateRule      = kernelUpdateRule,
                                         model           = kernelModel,
                                         syncOperator    = NoSyncOperator()),
                PredictionEnvironment(   numberOfNodes   = numberOfNodes,
                                         updateRule      = linearUpdateRule,
                                         model           = linearModel,
                                         syncOperator    = CentralSyncOperator(),
                                         serial          = True),
                PredictionEnvironment(   numberOfNodes   = numberOfNodes,
                                         updateRule      = kernelUpdateRule,
                                         model           = kernelModel,
                                         syncOperator    = CentralSyncOperator(),
                                         serial          = True),
                #static averaging
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = 2 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = 4*anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = 8 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()), 
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = 16 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = 32 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),       
               
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = 2 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = 4*anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = 8 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()), 
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = 16 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = 32 * anchorBatchSize,
                                         syncOperator            = StaticSyncOperator()),    
                #dynamic averaging                              
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.05),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.08),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.1),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.3),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.5),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = linearUpdateRule,
                                         model                   = linearModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(1.0),),
               
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.05),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.08),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.1),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.3),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(0.5),),
                PredictionEnvironment(   numberOfNodes           = numberOfNodes,
                                         updateRule              = kernelUpdateRule,
                                         model                   = kernelModel,
                                         batchSizeInMacroRounds  = anchorBatchSize,
                                         syncOperator            = HedgedDistBaseSync(1.0),),
               ]
    
    #experiment.runParameterEvaluation(inputStream, envs, numberOfNodes*100)
    experiment.run(inputStream, envs, MaxNumberOfExamplesCondition(numberOfNodes*500))
コード例 #11
0
import argparse
import experiment

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    # NN
    parser.add_argument('--in_channels', type=int, default=3)
    parser.add_argument('--n_classes', type=int, default=10)
    parser.add_argument('--trained', type=str, default='outputs/garbage_')
    parser.add_argument('--slope', type=float, default=0.1)
    # train
    # parser.add_argument('--lr', type=float, default=2e-4)
    parser.add_argument('--lr', type=float, default=1e-3)
    # parser.add_argument('--weight_decay', type=float, default=2.5e-5)
    parser.add_argument('--epochs', type=int, default=100)
    # parser.add_argument('--epochs', type=int, default=512)
    parser.add_argument('--batch_size', type=int, default=128)
    # parser.add_argument('--betas', type=float, nargs='+', default=(.5, .999))
    # misc
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--n_workers', type=int, default=0)
    parser.add_argument('--logdir', type=str, default='outputs/garbage1_')
    parser.add_argument('--message', '-m', type=str, default='')
    parser.add_argument('--mode',
                        type=str,
                        default='s2u',
                        choices=['m2mm', 's2u'])
    args, unknown = parser.parse_known_args()
    experiment.run(args)
コード例 #12
0
ファイル: video.py プロジェクト: compmem/psyqt
    # with Parallel():
    #     show(Text("Jubba",loc=(400,300)),duration=1005)
    #     show(Text("Jubba2",loc=(200,100)),duration=2010)
    #     with Serial():
    #         wait(1005)
    #         show(Text("Wubba",loc=(300,200)),duration=1005)
    #         show(Text("Wubba2",loc=(300,200)),duration=2010)
    #     with Serial():
    #         wait(2010)
    #         show(Text("Lubba",loc=(500,400)),duration=2010)

    for i in range(10):
        show(Text(str(i),loc=(400,300)),duration=205)

    # run the experiment
    run()






### Scratch

# class ShowState(ExpState):
#     def __init__(self, items, duration=None,
#                  update_screen=True, parent=None):
#         if not isinstance(items,list):
#             items = [items]
#         self.items = items
#         self.duration = duration
コード例 #13
0
ファイル: run.py プロジェクト: SimonRamstedt/ddpg
      # self.env.render(mode='human')

      if FLAGS.random:
        action = self.env.action_space.sample()
      else:
        action = self.agent.act(test=test)

      observation, reward, term, info = self.env.step(action)
      term = (t >= FLAGS.tmax) or term

      r_f = self.env.filter_reward(reward) if FLAGS.autonorm else reward
      self.agent.observe(r_f,term,observation,test = test and not FLAGS.tot)

      if test:
        self.t_test += 1
      else:
        self.t_train += 1

      R += reward
      t += 1


    return R


def main():
  Experiment().run()

if __name__ == '__main__':
  experiment.run(main)
コード例 #14
0
                action = self.env.action_space.sample()
            else:
                action = self.agent.act(test=test)

            observation, reward, term, info = self.env.step(action)
            term = (t >= FLAGS.tmax) or term

            r_f = self.env.filter_reward(reward)
            self.agent.observe(r_f,
                               term,
                               observation,
                               test=test and not FLAGS.tot)

            if test:
                self.t_test += 1
            else:
                self.t_train += 1

            R += reward
            t += 1

        return R


def main():
    Experiment().run()


if __name__ == '__main__':
    experiment.run(main)
コード例 #15
0
    parser.add_argument('--model', type=str, default='convlstm1layer')
    parser.add_argument('--kernel_size', type=int, nargs='+', default=(5, 5))
    parser.add_argument('--stride', type=int, default=1)
    parser.add_argument('--hidden_dims', type=int, nargs='+', default=[16, ])
    parser.add_argument('--n_layers', type=int, default=1)
    parser.add_argument('--teacher_forcing_ratio', type=float, default=0.)
    parser.add_argument('--logit_output', action='store_true', default=False)
    # training
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--device', type=str, default='cuda:0')
    parser.add_argument('--loss', type=str, default='loss/reduction')
    # optim
    parser.add_argument('--optim', type=str, default='adam')
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--betas', nargs='+', type=float, default=(0.9, 0.999))
    parser.add_argument('--weight_decay', type=float, default=0.)
    parser.add_argument('--rmsprop_alpha', type=float, default=0.99)
    parser.add_argument('--scheduler', type=str, default='')
    parser.add_argument('--milestones', nargs='+', type=int)
    parser.add_argument('--gamma', nargs='+', type=float)
    # misc
    parser.add_argument('--logdir', type=str, default='./logs')
    parser.add_argument('--expid', type=str, default='')
    parser.add_argument('--resume', type=str, default=None)
    parser.add_argument('--random_seed', type=int, default=42)
    parser.add_argument('--debug', action='store_true', default=False)

    args, _ = parser.parse_known_args()
    run(args)
try:
    if args.vmstart == 'initial':
        # if configured start all vm's necessary for the whole experiment suite in the beginning
        azure_vm.start(n_client=max_n_client,
                       n_middleware=max_n_middleware,
                       n_server=max_n_server)

    for exp_id in tqdm(exp_ids, desc="suite"):
        # ensure necessary vm's for this experiment are started
        azure_vm.start(n_client=configs[exp_id]['n_client'],
                       n_middleware=configs[exp_id]['n_middleware'],
                       n_server=configs[exp_id]['n_server'])

        # run the experiment on the vm's
        experiment.run(experiment_suite_id=experiment_suite_id,
                       experiment_name=exp_id)

    # allows to add some additional experiments add the end while already running simulation
    with open(f"./configs/additional_names.json") as file:
        additional_names = json.load(file)

    for exp_id in tqdm(additional_names["exp_names"], desc="suite add"):
        # run the experiment on the vm's
        experiment.run(experiment_suite_id=experiment_suite_id,
                       experiment_name=exp_id)

finally:
    # Deallocate all VM's
    notify = Notify(config.NOTIFY_URL)
    notify.send("Experiment Suite Stopped")
コード例 #17
0
def main():
    """Setup / Start the experiment
    """
    args = get_args()
    experiment.run(args)
コード例 #18
0
def FIDAutocalibrateLarmor_standalone(
        init_gpa=False,  # Starts the gpa
        larmorFreq=3.075,  # Larmor frequency (MHz)
        rfExAmp=0.3,  # RF excitation pulse amplitude (a.u.)
        rfReAmp=0,  # RF refocusing pulse amplitude (a.u.)
        rfExPhase=0,  # Phase of the excitation pulse (degrees)
        rfExTime=35,  # RF excitation pulse time (us)
        rfReTime=0,  # RF refocusing pulse time (us)
        repetitionTime=500,  # Repetition time
        nReadout=160,  # Acquisition points
        acqTime=4,  # Acquisition time (ms)
        shimming=np.array([0, 0, 0
                           ]),  # Shimming along the X,Y and Z axes (a.u. *1e4)
        dummyPulses=0,  # Dummy pulses for T1 stabilization
        plotSeq=0):

    # Changing units
    repetitionTime = repetitionTime * 1e3
    acqTime = acqTime * 1e3
    shimming = shimming * 1e-4

    # Global variables
    addRdPoints = 10  # Extra points adquired to prevent bad first adquired points by RP

    # Conditions about RF
    if rfReAmp == 0:
        rfReAmp = 2 * rfExAmp
    if rfReTime == 0:
        rfReTime = rfExTime
    rfExPhase = rfExPhase * np.pi / 180
    rfExAmp = rfExAmp * np.exp(1j * rfExPhase)
    rfRePhase = np.pi / 2
    rfReAmp = rfReAmp * np.exp(1j * rfRePhase)

    # Matrix size
    nRD = nReadout + 2 * addRdPoints

    # SEQUENCE ################################################################
    # FID and calibration
    acqTimeReal = FIDsequence(larmorFreq, nRD, acqTime, rfExTime, rfExAmp,
                              repetitionTime, shimming, dummyPulses)

    #RP and data analysis
    if plotSeq == 1:
        ex.plot_sequence()
        plt.show()
        ex.__del__()
    elif plotSeq == 0:
        print('Running...')
        rxd, msgs = ex.run()
        ex.__del__()
        print('End')
        data = sig.decimate(rxd['rx0'] * 13.788,
                            conf.oversamplingFactor,
                            ftype='fir',
                            zero_phase=True)
        # data = data[addRdPoints:nReadout+addRdPoints]
        dataPlot(data, acqTimeReal, nRD, addRdPoints, 1)
        larmorFreqCal = dataAnalysis(larmorFreq, data, acqTimeReal, nRD,
                                     addRdPoints, 3)
        plt.show()
    # FID for Larmor frecuency
    acqTimeReal = FIDsequence(larmorFreqCal, nRD, acqTime, rfExTime, rfExAmp,
                              repetitionTime, shimming, dummyPulses)
    #RP and plot echo
    if plotSeq == 1:
        ex.plot_sequence()
        plt.show()
        ex.__del__()
    elif plotSeq == 0:
        print('Running...')
        rxd, msgs = ex.run()
        ex.__del__()
        print('End')
        data = sig.decimate(rxd['rx0'] * 13.788,
                            conf.oversamplingFactor,
                            ftype='fir',
                            zero_phase=True)
        # data = data[addRdPoints:nReadout+addRdPoints]
        dataPlot(data, acqTimeReal, nRD, addRdPoints, 4)
        plt.show()
コード例 #19
0
from data_sources.linear_log_popularity_data_source import MetaTagExclusiveEarlyToLatePopularityDataSource
import experiment

experiment.run(MetaTagExclusiveEarlyToLatePopularityDataSource)
コード例 #20
0
from data_sources.linear_log_popularity_data_source import MetaTagAwareEarlyToLatePopularityDataSource
import experiment

experiment.run(MetaTagAwareEarlyToLatePopularityDataSource)
コード例 #21
0
ファイル: video.py プロジェクト: paulhendricks/psyqt
    # with Parallel():
    #     show(Text("Jubba",loc=(400,300)),duration=1005)
    #     show(Text("Jubba2",loc=(200,100)),duration=2010)
    #     with Serial():
    #         wait(1005)
    #         show(Text("Wubba",loc=(300,200)),duration=1005)
    #         show(Text("Wubba2",loc=(300,200)),duration=2010)
    #     with Serial():
    #         wait(2010)
    #         show(Text("Lubba",loc=(500,400)),duration=2010)

    for i in range(10):
        show(Text(str(i), loc=(400, 300)), duration=205)

    # run the experiment
    run()

### Scratch

# class ShowState(ExpState):
#     def __init__(self, items, duration=None,
#                  update_screen=True, parent=None):
#         if not isinstance(items,list):
#             items = [items]
#         self.items = items
#         self.duration = duration
#         self.update_screen = update_screen
#         ExpState.__init__(self, parent=parent)

#     def onEntry(self, ev):
#         self._last_time = now()
コード例 #22
0
def main():
    np.random.seed(87655678)

    numberOfNodes = 5
    dimensionality = 2
    inputStream = inputs.xor.XORProblem(nodes=numberOfNodes,
                                        dim=dimensionality)
    print "Bayes Optimal Error: ",
    print inputStream.computeBayesOptimalError()
    lossFunction = SquaredLoss()
    learningRate = 0.1
    lmbda = 0.0001
    sigma = 4.5
    kernel = GaussianKernel(sigma)
    noModelComp = NoCompression()
    modelComp = Projection(0.1, kernel)
    linupdateRule = StochasticGradientDescent(lossFunction, lmbda,
                                              learningRate)
    updateRule = KernelStochasticGradientDescent(lossFunction, lmbda,
                                                 learningRate)
    anchorBatchSize = 20
    linmodel = LinearClassificationModel()
    model = KernelClassificationModel(kernel, noModelComp)
    modelComp = KernelClassificationModel(kernel, modelComp)
    dnnModel = DNNModel(indim=dimensionality, outdim=1)

    envs = [
        #baselines
        #                 PredictionEnvironment(   numberOfNodes   = numberOfNodes,
        #                                          updateRule      = linupdateRule,
        #                                          model           = dnnModel,
        #                                          batchSizeInMacroRounds  = 5000,
        #                                          syncOperator    = NoSyncOperator()),
        #                 PredictionEnvironment(   numberOfNodes   = numberOfNodes,
        #                                          updateRule      = linupdateRule,
        #                                          model           = dnnModel,
        #                                          syncOperator    = CentralSyncOperator(),
        #                                          serial          = True),
        #                 PredictionEnvironment(   numberOfNodes   = numberOfNodes,
        #                                          updateRule      = linupdateRule,
        #                                          model           = dnnModel,
        #                                          syncOperator    = CentralSyncOperator(),
        #                                          serial          = False),
        #                 #static averaging
        #                 PredictionEnvironment(   numberOfNodes           = numberOfNodes,
        #                                          updateRule              = linupdateRule,
        #                                          model                   = dnnModel,
        #                                          batchSizeInMacroRounds  = 5000,#50000 was good
        #                                          syncOperator            = StaticSyncOperator()),
        #                 #dynamic averaging
        PredictionEnvironment(
            numberOfNodes=numberOfNodes,
            updateRule=linupdateRule,
            model=dnnModel,
            batchSizeInMacroRounds=5000,
            syncOperator=HedgedDistBaseSync(1.0),
        ),
    ]

    #experiment.runParameterEvaluation(inputStream, envs, numberOfNodes*100)
    experiment.run(inputStream, envs, MaxNumberOfExamplesCondition(100))
コード例 #23
0
from mnist import load_mnist
import matplotlib.pyplot as plt
from experiment import run
from sklearn import svm

i, t = load_mnist()
"""
image = i[20]
imager = image.reshape((28,28))
plt.imshow(imager,cmap = "Greys")
plt.show()
"""
results = run(svm.SVC(gamma='auto'),
                 "grid_search",
                 {"kernel": ["linear", "poly", "rbf"], "degree": [3], "C": [0.1, 1, 10]},
                 i,
                 t)

print(results)