Ejemplo n.º 1
0
def main():

    parser = argparse.ArgumentParser(description='Backtest an algorithm.')
    # parser.add_argument("-a", "--algo", dest="algo", required=True, help="algorithm identifier")
    parser.add_argument("-l", "--log-level", dest="log_level", choices=['DEBUG', 'INFO', 'WARN'], default="INFO", help="logging level")
    parser.add_argument("-p", "--show-progress", dest="show_progress", action='store_true', help="log progress")

    global args
    args = parser.parse_args()

    level = logging.INFO
    if args.log_level == 'DEBUG':
        level = logging.DEBUG
    elif args.log_level == 'INFO':
        level = logging.INFO
    elif args.log_level == 'WARN':
        level = logging.WARN
    logging.basicConfig(level=level)

    config = Config("config.conf")

    data_provider = config.data_provider
    venue_connection = Broker(data_provider)

    orderbook_persist = BacktestOrderbookPersist()
    order_book = OrderBook(venue_connection, orderbook_persist)
    market_data = MarketData(venue_connection)

    containers = []
#    containers.append(Container(Algo(25, 10, 10), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 50, MarketDataPeriod.HOUR_4), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 50, MarketDataPeriod.DAY), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 60, MarketDataPeriod.DAY), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 70, MarketDataPeriod.DAY), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 80, MarketDataPeriod.DAY), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 90, MarketDataPeriod.DAY), 10000, order_book, market_data))
    containers.append(Container(NakedBigShadow(7, 10, 100, MarketDataPeriod.DAY), 10000, order_book, market_data))
    # containers.append(Container(Algo(15, 50, 100), 100000, order_book, market_data))

    if args.show_progress is True:
        progress_bar = ProgressBar(data_provider.expected_result_count, label='Backtest')
        data_provider.set_progress_callback(lambda x: progress_bar.set(x))
        venue_connection.start()
        progress_bar.complete()
    else:
        venue_connection.start()

    for container in containers:
        display_results(container)
Ejemplo n.º 2
0
print('Read images...')
# mode 1 small data, read all imgs
if mode == 1:
    dataset = [cv2.imread(v, cv2.IMREAD_UNCHANGED) for v in img_list]
    data_size = sum([img.nbytes for img in dataset])
# mode 2 large data, read imgs later
elif mode == 2:
    data_size = sum(os.stat(v).st_size for v in img_list)
else:
    raise ValueError('mode should be 1 or 2')

env = lmdb.open(lmdb_save_path, map_size=data_size * 10)
print('Finish reading {} images.\nWrite lmdb...'.format(len(img_list)))

pbar = ProgressBar(len(img_list))
batch = 3000  # can be modified according to memory usage
txn = env.begin(write=True)  # txn is a Transaction object
for i, v in enumerate(img_list):
    pbar.update('Write {}'.format(v))
    base_name = os.path.splitext(os.path.basename(v))[0]
    key = base_name.encode('ascii')
    data = dataset[i] if mode == 1 else cv2.imread(v, cv2.IMREAD_UNCHANGED)
    if data.ndim == 2:
        H, W = data.shape
        C = 1
    else:
        H, W, C = data.shape
    meta_key = (base_name + '.meta').encode('ascii')
    meta = '{:d}, {:d}, {:d}'.format(H, W, C)
    # The encode is only essential in Python 3
Ejemplo n.º 3
0
 def __merge_colliders(self, colliders, project_to_merge_name,
                       colliders_to_merge, objects_xml_to_merge):
     pbar = ProgressBar(colliders_to_merge.items(),
                        title="MERGE THE COLLIDERS")
     self.__merge_objects(colliders, project_to_merge_name,
                          objects_xml_to_merge, pbar)
Ejemplo n.º 4
0
 def __merge_scene_objects(self, scene_objects, project_to_merge_name,
                           scene_objects_to_merge, objects_xml_to_merge):
     pbar = ProgressBar(scene_objects_to_merge.items(),
                        title="MERGE THE OBJECTS")
     self.__merge_objects(scene_objects, project_to_merge_name,
                          objects_xml_to_merge, pbar)
def simulate(MARKET, CONFIG, strategy_name, portfolio, start_date, end_date, output="~/.quant/simulation.h5", strategy_params="{}"):
    """A simple simulator that simulates a strategy that only makes
    decisions at closing.  Only BUY and SELL orders are supported.  Orders
    are only good for the next day.

    A price type of MARKET is executed at the open price the next day.

    A price type of MARKET_ON_CLOSE is executed at the close price the next day.

    A price type of LIMIT will be executed at the LIMIT price the next day if LIMIT
    is between the low and high prices of the day.

    A price type of STOP will be executed at the STOP price the next day if STOP
    is between the low and high prices of the day.

    A price type of STOP_LIMIT will be executed at the LIMIT price the next day if STOP
    is between the low and high prices of the day.
    """

    outputFile = openOutputFile(output)
    # Get some of the tables from the output file
    order_tbl = outputFile.getNode("/Orders")
    postion_tbl = outputFile.getNode("/Position")
    performance_tbl = outputFile.getNode("/Performance")
        
    start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
    end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
    # Start the simulation at closing of the previous trading day
    print start_date 
    now = getPrevTradingDay(MARKET, start_date)
    try:
        position = initialize_position(CONFIG, portfolio, now)
        for instrument, p in position.items():
            if instrument != '$':
                quote = MARKET[instrument][now]
                if quote == None:
                    return
        # Pre-cache some info to make the simulation faster
        '''
        ticker = MARKET["399001.sz"].updateHistory(start_date, end_date)
        for symbol in position.keys():
            if symbol != '$':
                MARKET[symbol].updateHistory(start=start_date, end=end_date)
        '''
        days = (end_date - start_date).days
        
        # Initialize the strategy
        params = yaml.load(strategy_params)
        imp.acquire_lock()
        strategy_clazz = load_strategy(strategy_name)
        imp.release_lock()
        print 'jinxiaoyi'
        strategy = strategy_clazz(start_date, end_date, position, MARKET, params, outputFile)
        p = ProgressBar(maxValue=days, totalWidth=80)
        print "Starting Simulation %s" % portfolio
        # Write the initial position to the database
        write_position(MARKET,postion_tbl, position, now)
        write_performance(MARKET,performance_tbl, position, now)
        while now <= end_date:
            #print now
            # Remember 'now' is after closing, so the strategy
            # can use any information from 'now' or earlier
            #orders = strategy.evaluate(now, position, MARKET)
            # Go to the next day to evalute the orders          
            while 1:
                orders = strategy.evaluate(now, position, MARKET)
                if orders == 'outdate':
                    outputFile.close()
                    return
                if orders == None:
                    now += ONE_DAY
                    p.performWork(1)
                else:
                    break
            # Execute orders
            execute_orders(MARKET, order_tbl, position, now, orders)
            write_position(MARKET, postion_tbl, position, now)
            write_performance(MARKET, performance_tbl, position, now)
            now += ONE_DAY
            # Flush the data to disk
            outputFile.flush()
            p.performWork(1)
            #print p, '\r'
            
        p.updateAmount(p.max)
        #print p, '\r',
        #print '\r\n' # End the progress bar here before calling finalize
        orders = strategy.finalize()
    finally:
        outputFile.close()
Ejemplo n.º 6
0
 def __merge_tiles(self, tiles, project_to_merge_name, tiles_to_merge,
                   objects_xml_to_merge):
     pbar = ProgressBar(tiles_to_merge.items(), title="MERGE THE TILES")
     self.__merge_objects(tiles, project_to_merge_name,
                          objects_xml_to_merge, pbar)
Ejemplo n.º 7
0
    def train(self,
              batch_dim,
              epochs,
              steps,
              train_fetch_dict,
              train_feed_dict,
              eval_fetch_dict,
              eval_feed_dict,
              test_fetch_dict,
              test_feed_dict,
              _train_step=None,
              _eval_step=None,
              _test_step=None,
              _train_update=None,
              _eval_update=None,
              _test_update=None,
              eval_batch=None,
              test_batch=None,
              best_fn=None,
              min_epochs=None,
              look_ahead=None,
              save_every=None,
              directory=None,
              skip_first_eval=False):

        if _train_step is None:

            def _train_step(step, steps, epoch, epochs, min_epochs, model,
                            optimizer, batch_dim):
                return self.session.run(
                    train_fetch_dict(step, steps, epoch, epochs, min_epochs,
                                     model, optimizer),
                    feed_dict=train_feed_dict(step, steps, epoch, epochs,
                                              min_epochs, model, optimizer,
                                              batch_dim))

        if _eval_step is None:

            def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                           batch_dim, eval_batch, start_time,
                           last_epoch_start_time, _eval_update):
                from_start = timedelta(seconds=int((time.time() - start_time)))
                last_epoch = timedelta(seconds=int((time.time() -
                                                    last_epoch_start_time)))
                eta = timedelta(
                    seconds=int((time.time() - start_time) * (epochs - epoch) /
                                epoch)) if (time.time() -
                                            start_time) > 1 else '-:--:-'
                self.log(
                    'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.format(
                        epoch, epochs, from_start, last_epoch, eta))
                if eval_batch is not None:
                    pr = ProgressBar(80, eval_batch)
                    output = defaultdict(list)
                    for i in range(eval_batch):
                        for k, v in self.session.run(
                                eval_fetch_dict(epoch, epochs, min_epochs,
                                                model, optimizer),
                                feed_dict=eval_feed_dict(
                                    epoch, epochs, min_epochs, model,
                                    optimizer, batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)
                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        eval_fetch_dict(epoch, epochs, min_epochs, model,
                                        optimizer),
                        feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                 model, optimizer, batch_dim))

                if _eval_update is not None:
                    output.update(
                        _eval_update(epoch, epochs, min_epochs, model,
                                     optimizer, batch_dim, eval_batch))
                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Validation --> {}'.format(p.pformat(output)))
                for k in output:
                    self.print[k].append(output[k])
                return output

        if _test_step is None:

            def _test_step(model, optimizer, batch_dim, test_batch, start_time,
                           _test_update):
                self.load(directory)
                from_start = timedelta(seconds=int((time.time() - start_time)))
                self.log('End of training ({} epochs) in {}'.format(
                    epochs, from_start))
                if test_batch is not None:
                    pr = ProgressBar(80, test_batch)
                    output = defaultdict(list)
                    for i in range(test_batch):
                        for k, v in self.session.run(test_fetch_dict(
                                model, optimizer),
                                                     feed_dict=test_feed_dict(
                                                         model, optimizer,
                                                         batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)
                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        test_fetch_dict(model, optimizer),
                        feed_dict=test_feed_dict(model, optimizer, batch_dim))
                if _test_update is not None:
                    output.update(
                        _test_update(model, optimizer, batch_dim, test_batch))
                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Test --> {}'.format(p.pformat(output)))
                for k in output:
                    self.print['Test ' + k].append(output[k])
                return output

        best_model_value = None
        no_improvements = 0
        start_time = time.time()
        last_epoch_start_time = time.time()

        for epoch in range(epochs + 1):
            if not (skip_first_eval and epoch == 0):
                result = _eval_step(epoch, epochs, min_epochs, self.model,
                                    self.optimizer, batch_dim, eval_batch,
                                    start_time, last_epoch_start_time,
                                    _eval_update)
                if best_fn is not None and (
                        True if best_model_value is None else
                        best_fn(result) > best_model_value):
                    self.save(directory)
                    best_model_value = best_fn(result)
                    no_improvements = 0
                elif look_ahead is not None and no_improvements < look_ahead:
                    no_improvements += 1
                    self.load(directory)
                elif min_epochs is not None and epoch >= min_epochs:
                    self.log('No improvements after {} epochs!'.format(
                        no_improvements))
                    break
                if save_every is not None and epoch % save_every == 0:
                    self.save(directory)
            if epoch < epochs:
                last_epoch_start_time = time.time()
                pr = ProgressBar(80, steps)
                for step in range(steps):
                    _train_step(steps * epoch + step, steps, epoch, epochs,
                                min_epochs, self.model, self.optimizer,
                                batch_dim)
                    pr.update(step + 1)
                self.log(date=False)
        _test_step(self.model, self.optimizer, batch_dim, eval_batch,
                   start_time, _test_update)
Ejemplo n.º 8
0
def computeScores(bn_name,csv_name,visible=False,transforme_label=None):
    if isinstance(bn_name,str):
      bn=gum.loadBN(bn_name)
    else:
      bn=bn_name

    nbr_lines=lines_count(csv_name)-1

    csvfile = open(csv_name, "rb")
    dialect = csv.Sniffer().sniff(csvfile.read(1024))
    csvfile.seek(0)

    batchReader = csv.reader(open(csv_name,'rb'),dialect)

    titre = batchReader.next()
    fields = {}
    for i,nom in enumerate(titre):
        fields[nom]=i

    positions=checkCompatibility(bn,fields,csv_name)
    if positions is None:
         sys.exit(1)

    inst=gum.Instantiation()
    bn.completeInstantiation(inst)

    if visible:
        prog = ProgressBar(csv_name+' : ',0, nbr_lines, 77,  mode='static', char='#')
        prog.display()

    nbr_insignificant=0
    num_ligne=0
    likelihood=0.0
    for data in batchReader:
        num_ligne+=1

        for i in range(inst.nbrDim()):
            try:
                inst.chgVal(i,getNumLabel(inst,i,data[positions[i]],transforme_label))
            except gum.OutOfBounds:
                print("out of bounds",i,positions[i],data[positions[i]],inst.variable(i))

        p=bn.jointProbability(inst)
        if p==0.0:
            print(str(num_ligne)+":"+str(inst))
            nbr_insignificant+=1
        else:
            likelihood+=math.log(p,2)
        if visible:
            prog.increment_amount()
            prog.display()

    if visible:
        print

    nbr_arcs=1.0*bn.sizeArcs()
    dim=1.0*bn.dim()

    aic=likelihood-dim
    aicc=2*aic-2*dim*(dim+1)/(nbr_lines-dim+1) if (nbr_lines-dim+1>0) else "undefined"
    bic=likelihood-dim*math.log(nbr_lines,2)
    mdl=likelihood-nbr_arcs*math.log(nbr_lines,2)-32*dim #32=nbr bits for a params

    return ((nbr_lines-nbr_insignificant)*100.0/nbr_lines,
            {'likelihood':likelihood,'aic':aic,'aicc':aicc,'bic':bic,'mdl':mdl})
Ejemplo n.º 9
0
    def train(self,
              batch_dim,
              epochs,
              steps,
              train_fetch_dict,
              train_feed_dict,
              eval_fetch_dict,
              eval_feed_dict,
              test_fetch_dict,
              test_feed_dict,
              _train_step=None,
              _eval_step=None,
              _test_step=None,
              _train_update=None,
              _eval_update=None,
              _test_update=None,
              eval_batch=None,
              test_batch=None,
              best_fn=None,
              min_epochs=None,
              look_ahead=None,
              save_every=None,
              directory=None,
              skip_first_eval=False,
              skip_training=False):

        if not skip_training:

            if _train_step is None:

                def _train_step(step, steps, epoch, epochs, min_epochs, model,
                                optimizer, batch_dim):
                    model.is_training = True
                    if not model.latent_opt:
                        model.is_training = False
                    print(
                        f"_train_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                    )
                    embeddings = model.sample_z(batch_dim)
                    # embeddings = model.z
                    # print(f"embeddings assigned: {embeddings}")
                    assign_op = model.embeddings_LO.assign(embeddings)

                    # a, b, c, _ = self.session.run([train_fetch_dict(step, steps, epoch, epochs, min_epochs, model, optimizer), assign_op], feed_dict=train_feed_dict(step, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim))
                    # a, _ = self.session.run([train_fetch_dict(step, steps, epoch, epochs, min_epochs, model, optimizer), assign_op], feed_dict=train_feed_dict(step, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim))
                    if model.latent_opt:
                        z_up = self.session.run(optimizer.train_step_z,
                                                feed_dict=train_feed_dict(
                                                    step, steps, epoch, epochs,
                                                    min_epochs, model,
                                                    optimizer, batch_dim))
                    z_updated_val = self.session.run(model.embeddings_LO)
                    # print(f"embeddings updated: {z_updated_val}")

                    a = self.session.run(
                        train_fetch_dict(step, steps, epoch, epochs,
                                         min_epochs, model, optimizer),
                        feed_dict=train_feed_dict(step, steps, epoch, epochs,
                                                  min_epochs, model, optimizer,
                                                  batch_dim))

                    #print("!!!!!!!!!!!!!!!!!!updates", b)
                    #print("###################",c)
                    return a
                    # return self.session.run(train_fetch_dict(step, steps, epoch, epochs, min_epochs, model, optimizer), feed_dict=train_feed_dict(step, steps, epoch, epochs, min_epochs, model, optimizer, batch_dim))

            if _eval_step is None:

                def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                               batch_dim, eval_batch, start_time,
                               last_epoch_start_time, _eval_update):
                    model.is_training = False
                    print(
                        f"_eval_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                    )

                    self.log(">>> 0 <<<")
                    from_start = timedelta(seconds=int((time.time() -
                                                        start_time)))
                    last_epoch = timedelta(
                        seconds=int((time.time() - last_epoch_start_time)))
                    eta = timedelta(
                        seconds=int((time.time() - start_time) *
                                    (epochs - epoch) /
                                    epoch)) if (time.time() -
                                                start_time) > 1 else '-:--:-'
                    self.log(">>> 1 <<<")

                    self.log(
                        'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.
                        format(epoch, epochs, from_start, last_epoch, eta))

                    if eval_batch is not None:
                        self.log(">>> 1a <<<")
                        pr = ProgressBar(80, eval_batch)
                        output = defaultdict(list)

                        for i in range(eval_batch):
                            for k, v in self.session.run(
                                    eval_fetch_dict(epoch, epochs, min_epochs,
                                                    model, optimizer),
                                    feed_dict=eval_feed_dict(
                                        epoch, epochs, min_epochs, model,
                                        optimizer, batch_dim)).items():
                                output[k].append(v)
                            pr.update(i + 1)

                        self.log(date=False)
                        output = {k: np.mean(v) for k, v in output.items()}
                    else:
                        self.log(">>> 1b <<<")
                        # print(eval_fetch_dict(epoch, epochs, min_epochs, model, optimizer))
                        # print(eval_feed_dict(epoch, epochs, min_epochs, model, optimizer, batch_dim))
                        output = self.session.run(
                            eval_fetch_dict(epoch, epochs, min_epochs, model,
                                            optimizer),
                            feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                     model, optimizer,
                                                     batch_dim))
                        self.log(">>> 1b2 <<<")

                    self.log(">>> 2 <<<")

                    if _eval_update is not None:
                        output.update(
                            _eval_update(epoch, epochs, min_epochs, model,
                                         optimizer, batch_dim, eval_batch))

                    self.log(">>> 3 <<<")

                    p = pprint.PrettyPrinter(indent=1, width=80)
                    self.log('Validation --> {}'.format(p.pformat(output)))

                    for k in output:
                        self.print[k].append(output[k])

                    self.log(">>> 4 <<<")
                    return output

            # ========================================================================

            best_model_value = None
            no_improvements = 0
            start_time = time.time()
            last_epoch_start_time = time.time()

            for epoch in range(epochs + 1):
                print(f">>>>>>>>> epoch {epoch} <<<<<<<<<")

                early_stop = False

                if not (skip_first_eval and epoch == 0):

                    result = _eval_step(epoch, epochs, min_epochs, self.model,
                                        self.optimizer, batch_dim, eval_batch,
                                        start_time, last_epoch_start_time,
                                        _eval_update)

                    if best_fn is not None and (
                            True if best_model_value is None else
                            best_fn(result) > best_model_value):
                        self.save(directory)
                        best_model_value = best_fn(result)
                        no_improvements = 0
                    elif look_ahead is not None and no_improvements < look_ahead:
                        no_improvements += 1
                        self.load(directory)
                    elif min_epochs is not None and epoch >= min_epochs:
                        self.log('No improvements after {} epochs!'.format(
                            no_improvements))
                        break

                    if save_every is not None and epoch % save_every == 0:
                        self.save(directory, epoch)

                    print(f"result['valid score']: {result['valid score']}")
                    print(f"result['unique score']: {result['unique score']}")
                    print(f"result['novel score']: {result['novel score']}")

                    # if result['valid score'] > 85 and result['novel score'] > 85 and result['unique score'] > 15:
                    #     print("early stop!")
                    #     early_stop = True

                if epoch < epochs and (not early_stop):
                    last_epoch_start_time = time.time()
                    pr = ProgressBar(80, steps)
                    for step in range(steps):
                        _train_step(steps * epoch + step, steps, epoch, epochs,
                                    min_epochs, self.model, self.optimizer,
                                    batch_dim)
                        pr.update(step + 1)

                    self.log(date=False)

                if early_stop:
                    print(f">>>> early stop at {epoch}! <<<<")
                    break
            """
            self.model = GraphGANModel ...
            self.optimizer = GraphGANOptimizer ...
            batch_dim = batch_dim ...
            eval_batch =
            """
        else:
            start_time = time.time()

        if _test_step is None:

            def _test_step(model, optimizer, batch_dim, test_batch, start_time,
                           _test_update):
                model.is_training = False
                print(
                    f"_test_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                )
                self.load(directory, model.test_epoch)
                from_start = timedelta(seconds=int((time.time() - start_time)))
                self.log('End of training ({} epochs) in {}'.format(
                    epochs, from_start))

                if test_batch is not None:
                    pr = ProgressBar(80, test_batch)
                    output = defaultdict(list)

                    for i in range(test_batch):
                        for k, v in self.session.run(test_fetch_dict(
                                model, optimizer),
                                                     feed_dict=test_feed_dict(
                                                         model, optimizer,
                                                         batch_dim)).items():
                            output[k].append(v)
                        pr.update(i + 1)

                    self.log(date=False)
                    output = {k: np.mean(v) for k, v in output.items()}
                else:
                    output = self.session.run(
                        test_fetch_dict(model, optimizer),
                        feed_dict=test_feed_dict(model, optimizer, batch_dim))

                if _test_update is not None:
                    output.update(
                        _test_update(model, optimizer, batch_dim, test_batch))

                p = pprint.PrettyPrinter(indent=1, width=80)
                self.log('Test --> {}'.format(p.pformat(output)))

                with open(log_filename, 'a') as f:
                    f.write('Test --> {}'.format(p.pformat(output)))

                for k in output:
                    self.print['Test ' + k].append(output[k])

                return output

        _test_step(self.model, self.optimizer, batch_dim, eval_batch,
                   start_time, _test_update)
Ejemplo n.º 10
0
def main():

    parser = argparse.ArgumentParser(description='Backtest an algorithm.')
    # parser.add_argument("-a", "--algo", dest="algo", required=True, help="algorithm identifier")
    parser.add_argument("-l",
                        "--log-level",
                        dest="log_level",
                        choices=['DEBUG', 'INFO', 'WARN'],
                        default="INFO",
                        help="logging level")
    parser.add_argument("-p",
                        "--show-progress",
                        dest="show_progress",
                        action='store_true',
                        help="log progress")

    global args
    args = parser.parse_args()

    level = logging.INFO
    if args.log_level == 'DEBUG':
        level = logging.DEBUG
    elif args.log_level == 'INFO':
        level = logging.INFO
    elif args.log_level == 'WARN':
        level = logging.WARN
    logging.basicConfig(level=level)

    config = Config("config.conf")

    data_provider = config.data_provider
    venue_connection = Broker(data_provider)

    orderbook_persist = BacktestOrderbookPersist()
    order_book = OrderBook(venue_connection, orderbook_persist)
    market_data = MarketData(venue_connection)

    containers = []
    #    containers.append(Container(Algo(25, 10, 10), 10000, order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 50, MarketDataPeriod.HOUR_4), 10000,
                  order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 50, MarketDataPeriod.DAY), 10000,
                  order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 60, MarketDataPeriod.DAY), 10000,
                  order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 70, MarketDataPeriod.DAY), 10000,
                  order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 80, MarketDataPeriod.DAY), 10000,
                  order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 90, MarketDataPeriod.DAY), 10000,
                  order_book, market_data))
    containers.append(
        Container(NakedBigShadow(7, 10, 100, MarketDataPeriod.DAY), 10000,
                  order_book, market_data))
    # containers.append(Container(Algo(15, 50, 100), 100000, order_book, market_data))

    if args.show_progress is True:
        progress_bar = ProgressBar(data_provider.expected_result_count,
                                   label='Backtest')
        data_provider.set_progress_callback(lambda x: progress_bar.set(x))
        venue_connection.start()
        progress_bar.complete()
    else:
        venue_connection.start()

    for container in containers:
        display_results(container)
Ejemplo n.º 11
0
                def _eval_step(epoch, epochs, min_epochs, model, optimizer,
                               batch_dim, eval_batch, start_time,
                               last_epoch_start_time, _eval_update):
                    model.is_training = False
                    print(
                        f"_eval_step, batch_dim: {batch_dim}, is_training: {model.is_training}"
                    )

                    self.log(">>> 0 <<<")
                    from_start = timedelta(seconds=int((time.time() -
                                                        start_time)))
                    last_epoch = timedelta(
                        seconds=int((time.time() - last_epoch_start_time)))
                    eta = timedelta(
                        seconds=int((time.time() - start_time) *
                                    (epochs - epoch) /
                                    epoch)) if (time.time() -
                                                start_time) > 1 else '-:--:-'
                    self.log(">>> 1 <<<")

                    self.log(
                        'Epochs {:10}/{} in {} (last epoch in {}), ETA: {}'.
                        format(epoch, epochs, from_start, last_epoch, eta))

                    if eval_batch is not None:
                        self.log(">>> 1a <<<")
                        pr = ProgressBar(80, eval_batch)
                        output = defaultdict(list)

                        for i in range(eval_batch):
                            for k, v in self.session.run(
                                    eval_fetch_dict(epoch, epochs, min_epochs,
                                                    model, optimizer),
                                    feed_dict=eval_feed_dict(
                                        epoch, epochs, min_epochs, model,
                                        optimizer, batch_dim)).items():
                                output[k].append(v)
                            pr.update(i + 1)

                        self.log(date=False)
                        output = {k: np.mean(v) for k, v in output.items()}
                    else:
                        self.log(">>> 1b <<<")
                        # print(eval_fetch_dict(epoch, epochs, min_epochs, model, optimizer))
                        # print(eval_feed_dict(epoch, epochs, min_epochs, model, optimizer, batch_dim))
                        output = self.session.run(
                            eval_fetch_dict(epoch, epochs, min_epochs, model,
                                            optimizer),
                            feed_dict=eval_feed_dict(epoch, epochs, min_epochs,
                                                     model, optimizer,
                                                     batch_dim))
                        self.log(">>> 1b2 <<<")

                    self.log(">>> 2 <<<")

                    if _eval_update is not None:
                        output.update(
                            _eval_update(epoch, epochs, min_epochs, model,
                                         optimizer, batch_dim, eval_batch))

                    self.log(">>> 3 <<<")

                    p = pprint.PrettyPrinter(indent=1, width=80)
                    self.log('Validation --> {}'.format(p.pformat(output)))

                    for k in output:
                        self.print[k].append(output[k])

                    self.log(">>> 4 <<<")
                    return output
def main():
    parser = argparse.ArgumentParser(
        'Extract sub images from Hyperspectral images.')
    parser.add_argument('--path1', type=str, default='E:/HSI/CAVE/CAVE/')
    parser.add_argument('--path2', type=str, default='E:/HSI/Harvard/Harvard/')
    parser.add_argument('--path3',
                        type=str,
                        default='E:/HSI/NTIRE2018/NTIRE2018_Train1_Spectral/')
    parser.add_argument('-o',
                        '--out-path',
                        type=str,
                        default='E:/HSI/HSI_MIX/')
    parser.add_argument('-t', '--threads', type=int, default=1)
    parser.add_argument('-c', '--crop-size', type=int, default=256)
    parser.add_argument('-s', '--stride', type=int, default=96)

    opt = parser.parse_args()
    in_path1 = opt.path1
    in_path2 = opt.path2
    in_path3 = opt.path3
    out_path = opt.out_path
    n_threads = opt.threads
    crop_sz = opt.crop_size
    stride = opt.stride
    thres_sz = 30  #
    compression_level = 3

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    img_list = []
    for root, _, file_list in sorted(os.walk(in_path1)):
        path = [os.path.join(root, x) for x in file_list]
        img_list.extend(path)
    for root, _, file_list in sorted(os.walk(in_path2)):
        path = [os.path.join(root, x) for x in file_list]
        img_list.extend(path)
    for root, _, file_list in sorted(os.walk(in_path3)):
        path = [os.path.join(root, x) for x in file_list]
        img_list.extend(path)

    l = len(img_list)
    train_ids = list(set([random.randrange(l) for _ in range(l)]))
    train_ids.sort()
    val_ids = [idx for idx in range(l) if idx not in train_ids]
    val_ids.sort()
    train_files = [img_list[idx] for idx in train_ids]
    val_files = [img_list[idx] for idx in val_ids]
    sio.savemat(os.path.join(out_path, 'filenames.mat'), {
        'train_files': train_files,
        'val_files': val_files
    })

    def update(arg):
        pbar.update(arg)

    pbar = ProgressBar(len(train_files))

    pool = Pool(n_threads)
    for path in train_files:
        pool.apply_async(worker,
                         args=(path, os.path.join(out_path, 'train.h5'),
                               crop_sz, stride, thres_sz, compression_level),
                         callback=update)
    pool.close()
    pool.join()
    print("-----------Generation Finish-------------")

    val_path = os.path.join(out_path, 'val')
    if not os.path.exists(val_path):
        os.makedirs(val_path)
    for path in val_files:
        shutil.copy2(path, val_path)
    print("-----------Copy Val-files Finish-------------")
Ejemplo n.º 13
0
def simulate(strategy_name,
             portfolio,
             start_date,
             end_date,
             output="~/.quant/simulation.h5",
             strategy_params="{}"):
    """A simple simulator that simulates a strategy that only makes
    decisions at closing.  Only BUY and SELL orders are supported.  Orders
    are only good for the next day.

    A price type of MARKET is executed at the open price the next day.

    A price type of MARKET_ON_CLOSE is executed at the close price the next day.

    A price type of LIMIT will be executed at the LIMIT price the next day if LIMIT
    is between the low and high prices of the day.

    A price type of STOP will be executed at the STOP price the next day if STOP
    is between the low and high prices of the day.

    A price type of STOP_LIMIT will be executed at the LIMIT price the next day if STOP
    is between the low and high prices of the day.
    """

    outputFile = openOutputFile(output)
    # Get some of the tables from the output file
    order_tbl = outputFile.getNode("/Orders")
    postion_tbl = outputFile.getNode("/Position")
    performance_tbl = outputFile.getNode("/Performance")

    start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
    end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
    # Start the simulation at closing of the previous trading day
    now = getPrevTradingDay(start_date)

    try:
        position = initialize_position(portfolio, now)

        # Pre-cache some info to make the simulation faster
        ticker = MARKET["^DJI"].updateHistory(start_date, end_date)
        for symbol in position.keys():
            if symbol != '$':
                MARKET[symbol].updateHistory(start=start_date, end=end_date)
        days = (end_date - start_date).days

        # Initialize the strategy
        params = yaml.load(strategy_params)
        strategy_clazz = load_strategy(strategy_name)
        strategy = strategy_clazz(start_date, end_date, position, MARKET,
                                  params, outputFile)

        p = ProgressBar(maxValue=days, totalWidth=80)
        print "Starting Simulation"

        while now <= end_date:

            # Write the initial position to the database
            write_position(postion_tbl, position, now)
            write_performance(performance_tbl, position, now)

            # Remember 'now' is after closing, so the strategy
            # can use any information from 'now' or earlier
            orders = strategy.evaluate(now, position, MARKET)

            # Go to the next day to evalute the orders
            now += ONE_DAY
            while not isTradingDay(now):
                now += ONE_DAY
                p.performWork(1)
                continue

            # Execute orders
            execute_orders(order_tbl, position, now, orders)

            # Flush the data to disk
            outputFile.flush()
            p.performWork(1)
            print p, '\r',

        p.updateAmount(p.max)
        print p, '\r',
        print '\n'  # End the progress bar here before calling finalize
        orders = strategy.finalize()
    finally:
        outputFile.close()
Ejemplo n.º 14
0
    def test(self, dataset_name= "Test set", use_crf = True, iters_per_log = 100, visualize = False, use_prior = True):
        self.model.eval()
        test_loss = 0
        class_correct =  [0] * self.num_classes
        class_jacard_or = [0] * self.num_classes
        loss_func = nn.CrossEntropyLoss()
        batches_done = 0
        progress_bar = ProgressBar("Test", len(self.train_loader), self.train_loader.batch_size)
        with torch.no_grad():            
            # calculate an UNBIASED prior
            prior = self.data_statistics.get_distribution().to(self.device)
            for i in range(self.num_classes):
                prior[i] = prior[i] / (torch.mean(prior[i]))  #  scales relative probs to have mean of 1
            normalization = torch.sum(prior, dim = 0)  # sum along classes
            prior /= normalization
            prior = torch.ones(prior.shape).to(self.device) - prior

            for batch_idx, (raw_samples, data, target) in tqdm(enumerate(self.test_loader)):  # runs through trainer
                data, target = data.to(self.device), target.to(self.device)
                #progress_bar.make_progress()
                output = self.model(data)
                if use_prior:
                    output = np.e**(output)
                    for i in range(len(output)): # could be multiple images in output batch
                        output[i] = output[i] - prior
                        output[i] = torch.sigmoid(output[i])
                        normalization = torch.sum(output[i], dim = 0)
                        output[i] /= normalization
                        output = np.log(output)

                if use_crf:
                    output = crf_batch_postprocessing(raw_samples, output, self.num_classes)

                output = output.to(self.device)
                test_loss += loss_func(output, target).item()

                #convert into 1 channel image with values 
                pred = torch.argmax(output, dim = 1, keepdim = False)
                #assert(pred.shape == (self.test_loader.batch_size, 1280, 720)), "got incorrect shape of: " + str(pred.shape)

                # record pixel measurements
                for i in range(self.num_classes):
                    correct_pixels = torch.where(pred.byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device)*i)
                                                 & target.view_as(pred).byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device)*i),
                                                 torch.ones(pred.shape, dtype=torch.uint8).to(self.device),
                                                 torch.zeros(pred.shape, dtype=torch.uint8).to(self.device)).sum().item()
                    class_correct[i] += correct_pixels
                    jaccard_or_pixels = torch.where(pred.byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device)*i)
                                                 | target.view_as(pred).byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device)*i),
                                                 torch.ones(pred.shape, dtype=torch.uint8).to(self.device),
                                                 torch.zeros(pred.shape, dtype=torch.uint8).to(self.device)).sum().item()
                    class_jacard_or[i] += jaccard_or_pixels

                get_per_class_accuracy(pred, target, self.model.test_stats.confusion)
                batches_done += 1

                if(batches_done % self.log_spacing == 0):
                    self.model.test_stats.per_class_accuracy.append(np.diagonal(self.model.test_stats.confusion).copy())
                    self.print_log(class_correct, class_jacard_or, test_loss, batches_done, self.test_loader.batch_size, dataset_name, True, self.model.test_stats.confusion, test = True)
                    print("saving model to {}".format(self.model.save_dir))
                    self.model.save()

                    if visualize:
                        visualize_output(pred, target, raw_samples)
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection2 Inference")
    parser.add_argument(
        "--config-file",
        default="configs/bua-caffe/extract-bua-caffe-r101.yaml",
        metavar="FILE",
        help="path to config file",
    )

    parser.add_argument('--num-cpus',
                        default=1,
                        type=int,
                        help='number of cpus to use for ray, 0 means no limit')

    parser.add_argument('--gpus',
                        dest='gpu_id',
                        help='GPU id(s) to use',
                        default='0',
                        type=str)

    parser.add_argument("--mode",
                        default="caffe",
                        type=str,
                        help="bua_caffe, ...")

    parser.add_argument(
        '--extract-mode',
        default='roi_feats',
        type=str,
        help="'roi_feats', 'bboxes' and 'bbox_feats' indicates \
                        'extract roi features directly', 'extract bboxes only' and \
                        'extract roi features with pre-computed bboxes' respectively"
    )

    parser.add_argument('--min-max-boxes',
                        default='min_max_default',
                        type=str,
                        help='the number of min-max boxes of extractor')

    parser.add_argument('--out-dir',
                        dest='output_dir',
                        help='output directory for features',
                        default="features")
    parser.add_argument('--image-dir',
                        dest='image_dir',
                        help='directory with images',
                        default="image")
    parser.add_argument('--bbox-dir',
                        dest='bbox_dir',
                        help='directory with bbox',
                        default="bbox")
    parser.add_argument('--grid_size', default=8, type=int)
    parser.add_argument(
        "--resume",
        action="store_true",
        help="whether to attempt to resume from the checkpoint directory",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    cfg = setup(args)

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
    num_gpus = len(args.gpu_id.split(','))

    MIN_BOXES = cfg.MODEL.BUA.EXTRACTOR.MIN_BOXES
    MAX_BOXES = cfg.MODEL.BUA.EXTRACTOR.MAX_BOXES
    CONF_THRESH = cfg.MODEL.BUA.EXTRACTOR.CONF_THRESH

    # Extract features.
    # imglist = os.listdir(args.image_dir)
    imglist = [
        os.path.join(dp, f) for dp, dn, fn in os.walk(args.image_dir)
        for f in fn
    ]
    imglist = [i.replace(f'{args.image_dir}/', '') for i in imglist]

    num_images = len(imglist)
    print('Number of images: {}.'.format(num_images))
    if args.num_cpus != 0:
        ray.init(num_cpus=args.num_cpus)
    else:
        ray.init()
    img_lists = [imglist[i::num_gpus] for i in range(num_gpus)]

    pb = ProgressBar(len(imglist))
    actor = pb.actor

    print('Number of GPUs: {}.'.format(num_gpus))
    extract_feat_list = []
    for i in range(num_gpus):
        extract_feat_list.append(
            extract_feat.remote(i, img_lists[i], cfg, args, actor))

    pb.print_until_done()
    ray.get(extract_feat_list)
    ray.get(actor.get_counter.remote())
Ejemplo n.º 16
0
    def train(self, epoch, start_index = 0):
        """
        Args:
            model (nn.Module): the FCN pytorch model
            device (torch.device): represents if we are running this on GPU or CPU
            optimizer (torch.optim): the optimization object that trains the network. Ex: torch.optim.Adam(modle.parameters())
            train_loader (torch.utils.data.DataLoader): the pytorch object that contains all training data and targets
            epoch (int): the epoch number we are on
            log_spacing (int): prints training statistics to display every <lo 
            
           _spacing> batches
            save_spacing (int): saves most recent version of model every <save_spacing> batches
            per_class (boolean): true if want class-level statistics printed. false otherwise
        """
        progress_bar = ProgressBar("Train", len(self.train_loader), self.train_loader.batch_size)
        self.model.train()  # puts it in training mode
        class_correct =  [0] * self.num_classes
        class_jacard_or = [0] * self.num_classes
        sum_loss = 0
        num_batches_since_log = 0
        loss_func = nn.CrossEntropyLoss(reduction = "none")
        # run through data in batches, train network on each batch
        for batch_idx, (_, data, target) in tqdm(enumerate(self.train_loader)):
            #progress_bar.make_progress()
            if batch_idx < start_index: continue
            loss_vec = torch.zeros((self.num_classes), dtype = torch.float32)
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()  # reset gradient to 0 (so doesn't accumulate)
            output = self.model(data)  # runs batch through the model
            loss = loss_func(output, target)  # compute loss of output

            # convert into 1 channel image with predicted class values 
            pred = torch.argmax(output, dim = 1, keepdim = False)
            #assert(pred.shape == (self.train_loader.batch_size, 1280, 720)), "got incorrect shape of: " + str(pred.shape)

            # record pixel measurements
            for i in range(self.num_classes):
                correct_pixels = torch.where(
                    pred.byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device) * i)
                    & target.view_as(pred).byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device) * i),
                    torch.ones(pred.shape, dtype = torch.uint8).to(self.device),
                    torch.zeros(pred.shape, dtype = torch.uint8).to(self.device)).sum().item()
                class_correct[i] += correct_pixels
                jaccard_or_pixels = torch.where(
                    pred.byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device) * i)
                    | target.view_as(pred).byte().eq(torch.ones(pred.shape, dtype = torch.uint8).to(self.device) * i),
                    torch.ones(pred.shape, dtype = torch.uint8).to(self.device),
                    torch.zeros(pred.shape, dtype = torch.uint8).to(self.device)).sum().item()
                class_jacard_or[i] += jaccard_or_pixels

            get_per_class_loss(loss, target, loss_vec)
            loss = torch.sum(loss_vec)
            self.model.train_stats.per_class_loss.append(loss_vec)

            sum_loss += loss.item()
            loss.backward()  # take loss object and calculate gradient; updates optimizer
            self.optimizer.step()  # update model parameters with loss gradient

            #update per-class accuracies
            get_per_class_accuracy(pred, target, self.model.train_stats.confusion)

            if batch_idx % self.log_spacing == 0:
                self.model.train_stats.per_class_accuracy.append(np.diagonal(self.model.train_stats.confusion).copy())
                print("Loss Vec: {}".format(loss_vec))
                self.print_log(class_correct, class_jacard_or, sum_loss, batch_idx + 1, self.train_loader.batch_size,
                          "Training Set", self.per_class, self.model.train_stats.confusion)

            if batch_idx % self.save_spacing == 0:
                print('Saving Model to: ' + str(self.model.save_dir))
                self.model.save()
def main():
    """A multi-thread tool to crop sub imags."""
    # input_folder = '/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800'
    # save_folder = '/mnt/SSD/xtwang/BasicSR_datasets/DIV2K800/DIV2K800_sub'
    input_sources = {
        'pristine': '/data/deva/pristine_dataset',
        # 'OST': '/data/deva/OutdoorSceneTrain_v2',
        #'Flickr2k_HR': '/data/deva/Flickr2K/Flickr2K_HR',
    }
    save_folder = Path('/home/deva/pristine_dataset_train_HR_crop_128')

    n_thread = 20
    crop_sz = 128
    step = 64
    thres_sz = 48
    compression_level = 0  # 3 is the default value in cv2
    # CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size and longer
    # compression time. If read raw images during training, use 0 for faster IO speed.

    image_list = []
    for input_src in input_sources:
        print(f"[INFO] {input_src}")
        dir_path = Path(input_sources[input_src])
        if not dir_path.is_dir():
            print(f"[ERROR] {dir_path} does not exists")
            continue
        for ext in ALLOWED_EXT:
            path_list = list(dir_path.glob(f"**/*{ext}"))
            print(
                f"[INFO] found {len(path_list)} file in {dir_path} with {ext} extension"
            )
            for image_path in path_list:
                image_list.append((input_src, image_path))

    # image_list = image_list[:10]
    print(f"[INFO] initiating processing of {len(image_list)} images")

    save_folder.mkdir(exist_ok=True, parents=True)

    def update(arg):
        pbar.update(arg)

    '''
    pbar = ProgressBar(len(img_list))

    pool = Pool(n_thread)
    for path in img_list:
        pool.apply_async(worker,
            args=(path, save_folder, crop_sz, step, thres_sz, compression_level),
            callback=update)
    pool.close()
    pool.join()
    print('All subprocesses done.')
    '''
    pbar = ProgressBar(len(image_list))

    pool = Pool(n_thread)
    for img_src in image_list:
        A = worker(img_src, save_folder, crop_sz, step, thres_sz,
                   compression_level)
        update(A)
    print('All subprocesses done.')
Ejemplo n.º 18
0
def simulate(strategy_name, portfolio, start_date, end_date, output="~/.quant/simulation.h5", strategy_params="{}"):
    """A simple simulator that simulates a strategy that only makes
    decisions at closing.  Only BUY and SELL orders are supported.  Orders
    are only good for the next day.

    A price type of MARKET is executed at the open price the next day.

    A price type of MARKET_ON_CLOSE is executed at the close price the next day.

    A price type of LIMIT will be executed at the LIMIT price the next day if LIMIT
    is between the low and high prices of the day.

    A price type of STOP will be executed at the STOP price the next day if STOP
    is between the low and high prices of the day.

    A price type of STOP_LIMIT will be executed at the LIMIT price the next day if STOP
    is between the low and high prices of the day.
    """

    outputFile = openOutputFile(output)
    # Get some of the tables from the output file
    order_tbl = outputFile.getNode("/Orders")
    postion_tbl = outputFile.getNode("/Position")
    performance_tbl = outputFile.getNode("/Performance")
        
    start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
    end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
    # Start the simulation at closing of the previous trading day
    now = getPrevTradingDay(start_date)

    try:
        position = initialize_position(portfolio, now)

        # Pre-cache some info to make the simulation faster
        ticker = MARKET["^DJI"].updateHistory(start_date, end_date)
        for symbol in position.keys():
            if symbol != '$':
                MARKET[symbol].updateHistory(start=start_date, end=end_date)
        days = (end_date - start_date).days
        
        # Initialize the strategy
        params = yaml.load(strategy_params)
        strategy_clazz = load_strategy(strategy_name)
        strategy = strategy_clazz(start_date, end_date, position, MARKET, params, outputFile)

        p = ProgressBar(maxValue=days, totalWidth=80)
        print "Starting Simulation"

        while now <= end_date:

            # Write the initial position to the database
            write_position(postion_tbl, position, now)
            write_performance(performance_tbl, position, now)
            
            # Remember 'now' is after closing, so the strategy
            # can use any information from 'now' or earlier
            orders = strategy.evaluate(now, position, MARKET)
               
            # Go to the next day to evalute the orders
            now += ONE_DAY
            while not isTradingDay(now):
                now += ONE_DAY
                p.performWork(1)
                continue
            
            # Execute orders
            execute_orders(order_tbl, position, now, orders)

            # Flush the data to disk
            outputFile.flush()
            p.performWork(1)
            print p, '\r',

        p.updateAmount(p.max)
        print p, '\r',
        print '\n' # End the progress bar here before calling finalize
        orders = strategy.finalize()
    finally:
        outputFile.close()
Ejemplo n.º 19
0
def objective(arguments):
    """
    Main Pipeline for training and cross-validation. ToDo - Testing will be done separately in test.py.
    """
    """ Setup result directory and enable logging to file in it """
    outdir = make_results_dir(arguments)
    logger.init(outdir, logging.INFO)
    logger.info('Arguments:\n{}'.format(pformat(arguments)))
    """ Initialize Tensorboard """
    tensorboard_writer = initialize_tensorboard(outdir)
    """ Set random seed throughout python, pytorch and numpy """
    logger.info('Using Random Seed value as: %d' % arguments['random_seed'])
    torch.manual_seed(
        arguments['random_seed'])  # Set for pytorch, used for cuda as well.
    random.seed(arguments['random_seed'])  # Set for python
    np.random.seed(arguments['random_seed'])  # Set for numpy
    """ Set device - cpu or gpu """
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    logger.info(f'Using device - {device}')
    """ Load Model with weights(if available) """
    model: torch.nn.Module = get_model(arguments.get('model_args')).to(device)
    """ Create loss function """
    criterion = create_loss(arguments['loss_args'])
    """ Create optimizer """
    optimizer = create_optimizer(model.parameters(),
                                 arguments['optimizer_args'])
    """ Load parameters for the Dataset """
    dataset: BaseDataset = create_dataset(arguments['dataset_args'],
                                          arguments['train_data_args'],
                                          arguments['val_data_args'])
    """ Generate all callbacks """
    callbacks: List[Callbacks] = generate_callbacks(arguments, dataset, device,
                                                    outdir)
    """ Debug the inputs to model and save graph to tensorboard """
    dataset.debug()
    dummy_input = (torch.rand(
        1,
        arguments['dataset_args']['name'].value['channels'],
        *arguments['dataset_args']['name'].value['image_size'],
    )).to(device)
    tensorboard_writer.save_graph(model, dummy_input)
    """ Pipeline - loop over the dataset multiple times """
    max_validation_accuracy = 0
    itr = 0

    best_model_path = None
    delete_old_models = True

    run_callbacks(callbacks,
                  model=model,
                  optimizer=optimizer,
                  mode=CallbackMode.ON_TRAIN_BEGIN)
    for epoch in range(arguments['nb_epochs']):
        """ Train the model """
        train_data_args = arguments['train_data_args']
        if train_data_args['to_train']:
            train_dataloader = dataset.train_dataloader
            progress_bar = ProgressBar(
                target=len(train_dataloader),
                clear=True,
                description=f"Training {epoch + 1}/{arguments['nb_epochs']}: ")
            loss_running_average = RunningAverage()

            run_callbacks(callbacks,
                          model=model,
                          optimizer=optimizer,
                          mode=CallbackMode.ON_EPOCH_BEGIN,
                          epoch=epoch)
            model.train()
            for i, data in enumerate(train_dataloader, 0):
                # get the inputs
                inputs, labels = data
                inputs = inputs.to(device)
                labels = labels.to(device)

                # zero the parameter gradients
                optimizer.zero_grad()

                # Forward Pass
                outputs = model(inputs)

                classification_loss = criterion(outputs, labels)
                tensorboard_writer.save_scalar('Classification_Loss',
                                               classification_loss.item(), itr)
                classification_loss.backward()
                optimizer.step()

                # Compute running loss. Not exact but efficient.
                running_loss = loss_running_average.add_new_sample(
                    classification_loss.item())
                progress_bar.update(i + 1, [
                    ('current loss', classification_loss.item()),
                    ('running loss', running_loss),
                ])
                tensorboard_writer.save_scalar('Training_Loss',
                                               classification_loss, itr)
                itr += 1

            # Callbacks ON_EPOCH_END should be run only when training is enabled. Thus call here.
            run_callbacks(callbacks,
                          model=model,
                          optimizer=optimizer,
                          mode=CallbackMode.ON_EPOCH_END,
                          epoch=epoch)
        """ Validate the model """
        val_data_args = arguments['val_data_args']
        if val_data_args['validate_step_size'] > 0 and \
                epoch % val_data_args['validate_step_size'] == 0:
            correct, total = 0, 0
            validation_dataloader = dataset.validation_dataloader
            progress_bar = ProgressBar(
                target=len(validation_dataloader),
                clear=True,
                description=f"Validating {epoch + 1}/{arguments['nb_epochs']}: "
            )
            model.eval()
            with torch.no_grad():
                for i, data in enumerate(validation_dataloader, 0):
                    inputs, labels = data
                    inputs = inputs.to(device)
                    labels = labels.to(device)

                    outputs = model(inputs)
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()

                    progress_bar.update(i + 1, [
                        ('Batch Accuracy', 100 * correct / total),
                    ])

            val_accuracy = 100 * correct / total
            tensorboard_writer.save_scalar('Validation_Accuracy', val_accuracy,
                                           itr)
            logger.info(
                f'Accuracy of the network on the {dataset.get_val_dataset_size} validation images: {val_accuracy} %%'
            )
            """ Save Model """
            if val_accuracy > max_validation_accuracy:
                if delete_old_models and best_model_path:
                    delete_old_file(best_model_path)
                best_model_path = os.path.join(
                    outdir,
                    f'epoch_{epoch:04}-model-val_accuracy_{val_accuracy}.pth')
                torch.save(model.state_dict(), best_model_path)
                max_validation_accuracy = val_accuracy

        tensorboard_writer.flush()

        # Exit loop if training not needed
        if not train_data_args['to_train']:
            break

    run_callbacks(callbacks,
                  model=model,
                  optimizer=optimizer,
                  mode=CallbackMode.ON_TRAIN_END)

    logger.info('Finished Training')
    close_tensorboard()
    logger.info(f'Max Validation accuracy is {max_validation_accuracy}')
    return max_validation_accuracy  # Return in case later u wanna add hyperopt.