예제 #1
0
async def main():
    async def evolution_step(e):
        e.mutation(attempts=4000)
        e.crossover(attempts=4000)
        await e.compute()
        e.selection(eval_param=0.05, save_n_best=3)

    logger = FileLogger('logout')
    iteration, step = 1, 0
    the_best_value = 0.0
    while step < stop_step:
        logger(f"Iteration: {iteration}\n")

        await asyncio.gather(*(evolution_step(e) for e in evolution))
        for e in evolution:
            e.print_info(iter=iteration)

        cur_best_value = max([e.get_best_protein().value for e in evolution])
        if the_best_value < cur_best_value:
            the_best_value = cur_best_value
            step = 0
        else:
            step += 1

        logger(f"The best value: {the_best_value}\n"
               f"Step/Stop {step}/{stop_step}\n\n")

        iteration += 1
예제 #2
0
def parseUSDaily(code):
    path = "C:/project/stockdata/UShistory/%s.csv" % code
    daily = pd.read_csv(path)

    # only retrieve the needed columns
    daily = daily[[
        'timestamp', 'volume', 'open', 'high', 'low', 'close', 'chg',
        'percent', 'turnoverrate', 'amount', 'pe', 'pb', 'ps', 'pcf',
        'market_capital'
    ]]
    daily['ts_code'] = code
    daily['trade_date'] = daily['timestamp'].map(
        lambda x: time.strftime('%Y/%m/%d', time.localtime(x / 1000)))
    daily = daily.drop('timestamp', axis=1)

    # change datatype to float64
    daily = daily.replace(to_replace='None', value='0')
    datatype = {
        "volume": "int64",
        "open": "float64",
        "high": "float64",
        "low": "float64",
        "close": "float64",
        "chg": "float64",
        "percent": "float64",
        "turnoverrate": "float64",
        "amount": "float64",
        "pe": "float64",
        "pb": "float64",
        "ps": "float64",
        "pcf": "float64",
        "market_capital": "float64",
        "ts_code": "object",
        "trade_date": "object"
    }
    daily = daily.astype(datatype)
    daily = daily.drop_duplicates(['ts_code', 'trade_date'])
    daily = daily.set_index(['ts_code', 'trade_date'])

    tableName = "daily" + str(getDBIndex(code))
    print(tableName)
    daily.to_sql(name=tableName, con=usEngine, if_exists="append")

    FileLogger.info("write data to Database successfully on code: %s" % code)
    def process(self, max_size=None, logger=None):
        if logger is None:
            logger = FileLogger()
        im = self.load(max_size)
        if im is None:
            logger.result(self.imgpath, {'Error': "Failed to load"}, self.bad_qualities, self.compare)
            return None
        remaining = self.measures[:]
        results = {}
        # rudimentary runner that only executes ones that have the given requirements
        while len(remaining) > 0:
            still_remaining = []
            for mod in remaining:
                # verify prereqs
                abort = False
                for prereq in mod.requires_result_from:
                    if prereq not in results:
                        still_remaining.append(mod)
                        abort = True
                        break
                if abort:
                    print "Failed to meet req for:", self._get_name(mod)
                    continue
                        
                kwargs = {}
                for name in mod.requires_result_from:
                    kwargs[name] = results[name]
                results[self._get_name(mod)] = self.execute(im, mod, kwargs)
            
            msg = 'Unresolve dependencies. I have %s to satisfy %s.' % (
                ','.join(results.keys()),
                ','.join(map(
                    lambda x: self._get_name(x) + "("+','.join(x.requires_result_from)+")",
                    remaining))
            )
                
            assert len(remaining) != len(still_remaining), msg
            remaining = still_remaining
        
        logger.result(self.imgpath, results, self.bad_qualities, self.compare)

                
예제 #4
0
def process(process_dir=None):
    if process_dir is None:
        process_dir = [
            v for v in globals().values() if isinstance(v, TrainingData)
        ]

    logger = MultiLogger(
        FileLogger(),
        HtmlLogger(open('output.html', 'w+'), os.path.abspath('thumb')))
    max_size = None  #(800, 600)
    for obj in process_dir:
        if obj.kind == 'good':
            obj.process(max_size=max_size, logger=logger)
    for obj in process_dir:
        if obj.kind == 'bad':
            obj.process(max_size=max_size, logger=logger)
    def process(self, max_size=None, logger=None):
        if logger is None:
            logger = FileLogger()
        im = self.load(max_size)
        if im is None:
            logger.result(self.imgpath, {'Error': "Failed to load"},
                          self.bad_qualities, self.compare)
            return None
        remaining = self.measures[:]
        results = {}
        # rudimentary runner that only executes ones that have the given requirements
        while len(remaining) > 0:
            still_remaining = []
            for mod in remaining:
                # verify prereqs
                abort = False
                for prereq in mod.requires_result_from:
                    if prereq not in results:
                        still_remaining.append(mod)
                        abort = True
                        break
                if abort:
                    print "Failed to meet req for:", self._get_name(mod)
                    continue

                kwargs = {}
                for name in mod.requires_result_from:
                    kwargs[name] = results[name]
                results[self._get_name(mod)] = self.execute(im, mod, kwargs)

            msg = 'Unresolve dependencies. I have %s to satisfy %s.' % (
                ','.join(results.keys()), ','.join(
                    map(
                        lambda x: self._get_name(x) + "(" + ','.join(
                            x.requires_result_from) + ")", remaining)))

            assert len(remaining) != len(still_remaining), msg
            remaining = still_remaining

        logger.result(self.imgpath, results, self.bad_qualities, self.compare)
예제 #6
0
from flask import Flask, jsonify, abort
from config_class import Config
from logger import FileLogger, ConsoleLogger
from collector import DbHandler
config = Config()

# get db name from yaml file
db_name = config.db['db_name']

file_logger = FileLogger()
console_logger = ConsoleLogger()
db_handler = DbHandler(db_name=db_name, index_list='')

app = Flask(__name__)


# get_collections
@app.route('/api/indices', methods=['GET'])
def get_collections():
    return jsonify({'collections': db_handler.get_db_collections()})


# get_by_collection_name
@app.route('/api/index/<index>', methods=['GET'])
def get_collection_data(index):
    is_index_exist(index)
    return jsonify({'results': db_handler.get_collection_data(index)})


# get_last_entry_from_collection
@app.route('/api/index/<index>/last', methods=['GET'])
예제 #7
0
                subprocess.check_call(args + [INTERPRETER_AUTODETECTION_FLAG],
                                      stdout=stdout,
                                      stderr=STDOUT)
            except CalledProcessError:
                continue
            if INTERPRETER_AUTODETECTION_VERBOSE_FLAG in sys.argv or ALL_AUTODETECTION_VERSION_FLAG in sys.argv:
                print('Re-launching using command line: ' + ' '.join(args))
            os.execv(args[0], args)
    print(
        'Error: One or more required python libraries have not been installed: '
        + str(e))
    sys.exit(1)

if not autodetecting:
    logger.register(ConsoleLogger())
    logger.register(FileLogger())

    if EXECUTABLE_AUTODETECTION_VERSION_FLAG in sys.argv or ALL_AUTODETECTION_VERSION_FLAG in sys.argv:
        import detect
        detect.VERBOSE_DETECT = True

    try:
        PaladinLinuxClient().run(
            mask_exceptions=(not EXCEPTION_BACKTRACE_FLAG in sys.argv and
                             not ALL_EXCEPTIONS_BACKTRACE_FLAG in sys.argv),
            handle_errors=(not ALL_EXCEPTIONS_BACKTRACE_FLAG in sys.argv))
    except KeyboardInterrupt as e:
        print('')
        sys.exit(1)
    finally:
        logger.shutdown()
예제 #8
0
    print(tableName)
    daily.to_sql(name=tableName, con=usEngine, if_exists="append")

    FileLogger.info("write data to Database successfully on code: %s" % code)


def getDBIndex(code):
    sum = 0
    for i in range(0, len(code)):
        sum = sum + ord(code[i])
    index = sum % 30
    return index


if __name__ == "__main__":
    # 查询语句:select ts_code from usstock.stocklist;
    stockdf = pd.read_csv("C:/project/Tushare/usstock/code.csv")
    errordf = pd.read_csv("C:/project/Tushare/usstock/get_error_ts_code.csv")
    errorList = errordf['ts_code'].to_numpy()
    stockList = stockdf[~stockdf['ts_code'].isin(errorList)]
    stockList = stockList['ts_code'].to_numpy()

    for code in stockList:
        FileLogger.info("running on code: %s" % code)
        try:
            parseUSDaily(code)

        except Exception as ex:
            FileLogger.error(ex)
            FileLogger.error("write data to Database error on code: %s" % code)
            time.sleep(1)
예제 #9
0
                        help='how long to wait before shutting down on error')

    parser.add_argument('--short-epoch',
                        action='store_true',
                        help='make epochs short (for debugging)')
    return parser


cudnn.benchmark = True
args = get_parser().parse_args()

# Only want master rank logging to tensorboard
is_master = (not args.distributed) or (dist_utils.env_rank() == 0)
is_rank0 = args.local_rank == 0
tb = TensorboardLogger(args.logdir, is_master=is_master)
log = FileLogger(args.logdir, is_master=is_master, is_rank0=is_rank0)


def main():
    os.system('shutdown -c')  # cancel previous shutdown command
    log.console(args)
    tb.log('sizes/world', dist_utils.env_world_size())

    # need to index validation directory before we start counting the time
    dataloader.sort_ar(args.data + '/validation')

    if args.distributed:
        log.console('Distributed initializing process group')
        torch.cuda.set_device(args.local_rank)
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
예제 #10
0
OMPI_COMM_WORLD_LOCAL_RANK = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK', '-1')
IS_CHIEF = (RANK == '0')
print(
    f"*** Debug: {os.uname()[1]} RANK={RANK} local_rank_arg={args.local_rank} LOCAL_RANK={LOCAL_RANK}, OMPI_COMM_WORLD_LOCAL_RANK={OMPI_COMM_WORLD_LOCAL_RANK}, {' '.join(sys.argv)}"
)

pp(dict(os.environ))

# Only want master rank logging to tensorboard
is_master = os.environ.get('RANK', '0') == '0'

# for mpirun the messages are propagated to main machine, so don't log in that case
is_rank0 = (args.local_rank == 0)

tb = TensorboardLogger(args.logdir, is_master=is_master)
log = FileLogger(args.logdir, is_master=is_master, is_rank0=is_rank0)

if args.log_all_workers:
    group_name = args.name
    run_name = args.name + '-' + os.environ.get("RANK", "0")
    wandb.init(project='imagenet18', group=group_name, name=run_name)
    log.console("initializing wandb logging to group " + args.name + " name ")
else:
    if not is_master:
        os.environ['WANDB_MODE'] = 'dryrun'  # all wandb.log are no-op
        log.console("local-only wandb logging for run " + args.name)
    wandb.init(project='imagenet18', name=args.name)
    log.console("initializing logging to run " + args.name)

if hasattr(wandb, 'config') and wandb.config is not None:
    wandb.config['gpus'] = int(os.environ.get('WORLD_SIZE', 1))
    running_loss = [0 for num in range(len(mlps))]
    train_loss = []

    print(f"Fold {k + 1}")
    for epoch in range(epochs):
        for i, (x_batch, y_batch) in enumerate(train_loader):
            x_batch, y_batch = x_batch.to(device), y_batch.to(device)
            optimizer.zero_grad()  # N个Model清除梯度
            for j, model in enumerate(mlps):
                y_pred = model.forward(x_batch)
                loss = criterion(y_pred, y_batch.squeeze().long())
                loss.backward()
                running_loss[j] += loss.item()
            optimizer.step()  #
        print(f"Epoch {epoch + 1} / {epochs}..")
        Log.info(f"Epoch {epoch + 1} / {epochs}..")
        for i in range(len(mlps)):
            train_loss.append(running_loss[i] / float(len(train)))
            print(f"{i} - Train loss: {train_loss[i]:.7f}..")
            Log.info(f"{i} - Train loss: {train_loss[i]:.7f}..")

        model.eval()

        pre = []
        vote_correct = 0
        mlps_correct = [0 for i in range(len(mlps))]

        test_loss = [0 for i in range(len(mlps))]

        with torch.no_grad():
            for i, (x_batch, y_batch) in enumerate(valid_loader):
예제 #12
0
log_text_area.pack(fill=BOTH)

log_text_area.bind('<Command-a>', select_all_text)
# log_text_area.bind('<Control-a>', select_all_text)

# ======== Odometry and Measurement Logging Frame ========
frm_pos = Frame(mw,
                width=511,
                height=330,
                highlightbackground='gray',
                highlightcolor='black',
                highlightthickness=2)
frm_pos.place(x=603, y=430, width=511, height=330)

pos_text_area = ScrolledText(
    master=frm_pos,
    # wrap=mw.WORD,
    highlightthickness=0)
pos_text_area.configure(state='disabled')
pos_text_area.pack(fill=BOTH)

pos_text_area.bind('<Command-a>', select_all_text)

setup_window_control(
    mw, Logger(log_text_area), FileLogger(pos_text_area, LOG_FILE),
    (ax_traj, fig_traj, fig_photo_traj, fig_canvas_agg_traj),
    (ax_vstream, fig_vstream, fig_photo_vstream, fig_canvas_agg_vstream),
    NAO_IP, NAO_PORT)
setup_menu(mw)
mw.mainloop()
예제 #13
0
 def __init__(self, logger=None):
     self.request_count = 0
     self.tasks = {}
     self.logger = logger or FileLogger()
     self.logger.log('--------------Server started--------------')