Exemplo n.º 1
0
def main():
    args = parser.parse_args()
    if args.directory.endswith(".zip"):
        directory = os.path.abspath(args.directory[0:-len(".zip")])
        if not os.path.exists(directory):
            subprocess.check_call([
                "unzip", args.directory, "-d", os.path.dirname(args.directory)
            ])
    else:
        directory = args.directory

    with open(os.path.join(directory, "manifest.json")) as fh:
        manifest = json.load(fh)

    if manifest["letters"] and not args.skip_letters:
        lfiles, ljobs, lpage = collate_letters(directory, manifest["letters"], 1)
        print "Found", len(ljobs), "letter jobs"
        if ljobs:
            run_batch(args, lfiles, ljobs)

    if manifest["postcards"] and not args.skip_postcards:
        pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
        print "Found", len(pjobs), "postcard jobs"
        if pjobs:
            run_batch(args, pfiles, pjobs)
Exemplo n.º 2
0
def main():
    args = parser.parse_args()
    if args.directory.endswith(".zip"):
        directory = os.path.abspath(args.directory[0:-len(".zip")])
        if not os.path.exists(directory):
            subprocess.check_call([
                "unzip", args.directory, "-d",
                os.path.dirname(args.directory)
            ])
    else:
        directory = args.directory

    with open(os.path.join(directory, "manifest.json")) as fh:
        manifest = json.load(fh)

    if manifest["letters"] and not args.skip_letters:
        lfiles, ljobs, lpage = collate_letters(directory, manifest["letters"],
                                               1)
        print "Found", len(ljobs), "letter jobs"
        if ljobs:
            run_batch(args, lfiles, ljobs)

    if manifest["postcards"] and not args.skip_postcards:
        pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
        print "Found", len(pjobs), "postcard jobs"
        if pjobs:
            run_batch(args, pfiles, pjobs)
Exemplo n.º 3
0
def main():
    args = parse_args()
    cfg = load_config(args)

    launch_job(cfg=cfg,
               init_method=args.init_method,
               func=benchmark_data_loading)
Exemplo n.º 4
0
def main():
    """
    Main function to spawn the train and test process.
    """
    args = parse_args()
    cfg = load_config(args)

    # Perform training.
    if cfg.TRAIN.ENABLE:
        launch_job(cfg=cfg, init_method=args.init_method, func=train)

    # Perform multi-clip testing.
    if cfg.TEST.ENABLE:
        launch_job(cfg=cfg, init_method=args.init_method, func=test)
Exemplo n.º 5
0
def main():
    args = parser.parse_args()

    with open(os.path.join(args.directory, "manifest.json")) as fh:
        manifest = json.load(fh)

    if manifest["letters"]:
        lfiles, ljobs, lpage = collate_letters(args.directory, manifest["letters"], 1)
        print "Found", len(ljobs), "letter jobs"
        if ljobs:
            run_batch(args, lfiles, ljobs)

    if manifest["postcards"]:
        pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
        print "Found", len(pjobs), "postcard jobs"
        if pjobs:
            run_batch(args, pfiles, pjobs)
Exemplo n.º 6
0
def main():
    args = parser.parse_args()

    with open(os.path.join(args.directory, "manifest.json")) as fh:
        manifest = json.load(fh)

    if manifest["letters"]:
        lfiles, ljobs, lpage = collate_letters(args.directory,
                                               manifest["letters"], 1)
        print "Found", len(ljobs), "letter jobs"
        if ljobs:
            run_batch(args, lfiles, ljobs)

    if manifest["postcards"]:
        pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
        print "Found", len(pjobs), "postcard jobs"
        if pjobs:
            run_batch(args, pfiles, pjobs)
Exemplo n.º 7
0
def main(argv):
    """
    Main entry of our list manager

    Parameters:

    - `argv`: array of string

    Return:

    - `int`: 0 is success otherwise failure
    """
    ret = 0
    parser = utils.parser.createParser()

    opt = parser.parse_args(argv)

    try:
        # Stop execution if help flag is on
        if opt.help:
            raise ShowHelpException()

        # Read materials we need for later processing
        cfg = auxiliary.readConfig(opt.config)
        rawJson = auxiliary.readJson(opt.input[0])

        # List we are going to manipulate :)
        commands = rawJson['commands']

        # Pump commands and instruction for filtering, sorting etc
        rawJson['commands'] = pump(commands, cfg)

        # Write final result
        auxiliary.writeJSONFile(rawJson, opt.output[0])

    except ShowHelpException:
        parser.print_help()
        ret = 0
    except FilterException, e:
        ret = 1
        print('ERROR')
        print(e)
Exemplo n.º 8
0
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "launch_info",
        help="Path to the launch file to analyze and optional arguments",
        nargs="+")
    parser.add_argument("-v",
                        "--verbose",
                        help="increase output verbosity",
                        action="store_true")
    parser.add_argument("-q",
                        "--quiet",
                        help="decrease output verbosity",
                        action="store_true")
    parser.add_argument("-np",
                        "--noplot",
                        help="don't plot the generated html",
                        action="store_true")

    args = parser.parse_args()
    return args
Exemplo n.º 9
0
- 1 train过程中的生成batch data
  2 计算test集的表现
- 2021/5/13
'''
import utils.metrics as metrics
from utils.parser import parse_args
from utils.load_data import *
import multiprocessing
import heapq

# 导入parser
# 需要参数 评价指标的K值集合;数据集信息来构造load_data

cores = multiprocessing.cpu_count() // 2

args = parse_args()
Ks = eval(args.Ks)

data_generator = Data(path=args.data_path + args.dataset, batch_size=args.batch_size)
USR_NUM, ITEM_NUM = data_generator.n_users, data_generator.n_items
N_TRAIN, N_TEST = data_generator.n_train, data_generator.n_test
BATCH_SIZE = args.batch_size

def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
    item_score = {}
    for i in test_items:
        item_score[i] = rating[i]

    K_max = max(Ks)
    K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
Exemplo n.º 10
0
def main():
    global args
    args = parse_args()
    train_net(args)
def test_get_repo_object():
    (options, args) = parse_args([])
    repo = get_repo_object("utapi", "master", options)
    # The current working directory is the root of this project.
    assert os.path.isdir("./utapi"), "Repository not cloned to project root."
Exemplo n.º 12
0
import utils as H
from Patient import *
from Waiting_Place import *
from Serve_Place import *
from utils.package import *
from utils import parser

args = parser.parse_args()
p_showup = args.p_showup
walk_in_rate = args.walk_in_rate
arrival_rate_blood = args.arrival_rate_blood
arrival_rate_scan = args.arrival_rate_scan
num_node = args.num_node
trans_prob = args.trans_prob
walk_time = H.walk_time


class Simulation(object):
    '''
    This class, Simulation, is to define the relations or processes and to run the whole simulator.
    '''
    def __init__(self,
                 num_node=num_node,
                 trans_prob=trans_prob,
                 walk_time=walk_time):
        # For statistics
        self.all_patient = []
        self.Patient_arrive_blood = []
        self.Patient_served_blood = []
        self.Walk_in_arrive_blood = []
        self.Walk_in_served_blood = []
Exemplo n.º 13
0
def main():
    global args
    args = parse_args()
    predict(args)
Exemplo n.º 14
0
from utils.parser import parse_args


def run(args):
    from run_nn import run as run_nn
    from run_svm import run as run_svm
    from run_pam import run as run_pam

    run_nn(args)
    run_svm(args)
    run_pam(args)


if __name__ == '__main__':
    run(parse_args().__dict__)
Exemplo n.º 15
0
import sys
import git
from utils.parser import parse_args
from utils.repo_handler import get_repo_object

(options, args) = parse_args(sys.argv[1:])

# Ensure that all positional arguments are given.
if len(args) != 3:
    parser.print_help()

repo_name = args[0]
source_branch = args[1]
target_branch = args[2]

try:
    repo = get_repo_object(repo_name, target_branch, options)
except git.exc.GitCommandError as e:
    # Get the git error message from stderr and output the message without
    # extraneous characters.
    message = e.stderr.find("fatal:")
    sys.exit(e.stderr[message:-2])

forward_branch = "forward/" + source_branch
git = repo.git

# We do not want to use any pre-existing branch.
try:
    git.branch('-D', forward_branch)
except:
    pass
Exemplo n.º 16
0
def main():
    #time.sleep(3600*6.5)
    global args
    args = parse_args()
    train_net(args)
Exemplo n.º 17
0
def main():
    args = parse_args()
    data_path = '{}experiment_data/{}/{}_{}/'.format(args.data_path,
                                                     args.dataset, args.prepro,
                                                     args.test_method)
    # 加载数据类 生成batch_data
    data_generator = Data(data_path, args.batch_size)
    data_config = dict()
    data_config['n_users'] = data_generator.n_users
    data_config['n_items'] = data_generator.n_items

    # 构造pretrain_data
    if args.pretrain in [-1]:
        pretrain_data = load_pretrain_data(args)
    else:
        pretrain_data = None

    # 构造模型
    if (args.model_type == 'bprmf'):
        model = BPRMF(data_config, pretrain_data, args)

    # 加载预训练模型参数(tf保存的整个模型参数)
    if args.pretrain == 1:
        # TODO
        a = 1
    """
    **********************************************
    初始化sess
    """
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())
    """
    ********************************************** 
    训练
    """
    loss_log, pre_log, rec_log, ndcg_log, hit_log = [], [], [], [], []
    stopping_step = 0
    should_stop = False

    # 训练epoch次数 遍历每个epoch
    for epoch in range(args.epoch):
        t1 = time()
        loss, mf_loss, reg_loss = 0., 0., 0.
        n_batch = data_generator.n_train // args.batch_size + 1
        for idx in range(n_batch):
            # 获取batch数据
            batch_data = data_generator.generate_train_cf_batch(idx)
            # 构造feed_fict
            feed_dict = data_generator.generate_train_feed_dict(
                model, batch_data)
            # run
            _, batch_loss, batch_mf_loss, batch_reg_loss = model.train(
                sess, feed_dict)
            loss += batch_loss
            mf_loss += batch_mf_loss
            reg_loss += batch_reg_loss

        loss /= n_batch
        mf_loss /= n_batch
        reg_loss /= n_batch

        loss_log.append(loss)

        if (np.isnan(loss)):
            print('ERROR:loss is nan')
            sys.exit()

        # 每隔show_step的epoch 进行test计算评价指标
        show_step = 100
        if (epoch + 1) % show_step != 0:
            # 每隔verbose的epoch 输出当前epoch的loss信息
            if (args.verbose > 0 and epoch % args.verbose == 0):
                print_str='Epoch {} [{:.1f}s]: train loss==[{:.5f}={:.5f}+{:.5f}]'\
                    .format(epoch,time()-t1,loss,mf_loss,reg_loss)
                print(print_str)

            continue
        """
        **********************************************
        测试 计算评价指标
        """
        print('TODO:test')