Beispiel #1
0
def lf():
    args = get_args()
    model = select_model(args)
    optimizer = select_optimizer(args, model)
    train_transforms = get_transforms(args)

    train_params = {
        'num_workers': 2,
        'batch_size': args.batch_size,
        'shuffle': True
    }

    train_generator = datasets.ImageFolder(args.root_path + '/' + 'train',
                                           train_transforms)
    train, _ = torch.utils.data.random_split(train_generator, [48000, 12000])

    train_loader = DataLoader(train, pin_memory=True, **train_params)

    criterion = nn.CrossEntropyLoss(reduction='mean')
    lr_finder = LRFinder(model, optimizer, criterion, device="cuda")
    lr_finder.range_test(train_loader,
                         end_lr=10,
                         num_iter=300,
                         step_mode="exp")
    lr_finder.plot()
Beispiel #2
0
 async def get(self, request, current_user, id_name=None):
     if id_name:
         if id_name.isnumeric():
             user = await Users.get_by_id(int(id_name))
         elif id_name == 'undefined':
             return json({'msg': 'wrong username'}, 404)
         else:
             try:
                 user = await Users.get_first('email', id_name)
             except DoesNotExist:
                 logging.error('Wrong e-mail or smth: ' + id_name)
         if current_user.id == user.id:
             return json(await user.get_my_user_data())
         return json(await user.get_public_data())
     else:
         sort_by = 'id'
         if request.args:
             if 'sort_by' in request.args:
                 sort_by = request.args['sort_by'][0]
                 del request.args['sort_by']
             users = await Users.get_by_many_field_value(
                 **get_args(request.args))
         else:
             users = await Users.get_all()
         user = []
         for u in users:
             if current_user.admin or current_user.organiser:
                 user.append(await u.to_dict())
             else:
                 user.append(await u.get_public_data())
         user.sort(key=lambda a: a[sort_by])
     return json(user, sort_keys=True)
Beispiel #3
0
def main():
    print "Note: This is a slow test, has a couple of sleeps in it to simulate proper state changes"
    args = utils.get_args()
    vt_mysqlbinlog = os.environ.get('VT_MYSQL_ROOT') + '/bin/vt_mysqlbinlog'
    if not os.path.isfile(vt_mysqlbinlog):
        sys.exit(
            "%s is not present, please install it and then re-run the test" %
            vt_mysqlbinlog)

    try:
        suite = unittest.TestSuite()
        if args[0] == 'run_all':
            setup()
            suite.addTests(unittest.TestLoader().loadTestsFromTestCase(
                RowCacheInvalidator))
        else:
            if args[0] != 'teardown':
                setup()
                if args[0] != 'setup':
                    for arg in args:
                        if hasattr(RowCacheInvalidator, arg):
                            suite.addTest(RowCacheInvalidator(arg))
        if suite.countTestCases() > 0:
            unittest.TextTestRunner(verbosity=utils.options.verbose).run(suite)
    except KeyboardInterrupt:
        pass
    except utils.Break:
        utils.options.skip_teardown = True
    finally:
        teardown()
Beispiel #4
0
def corpus_contents():
    args = get_args()
    ret = {}
    for fullfn, contents in find_files(args.corpusdir, ext='.xd'):
        xdid = parse_xdid(fullfn)
        ret[xdid.lower()] = contents
    return ret
Beispiel #5
0
async def main():
    args = utils.get_args(get_arguments_parser)
    attempt = 0

    while True:
        try:
            reader, writer = await asyncio.open_connection(host=args.host,
                                                           port=args.port)
            if attempt:
                print(await write_message_to_file(args.history,
                                                  'Установлено соединение\n'))
                attempt = 0
            message = await get_message_text(reader)
            print(await write_message_to_file(args.history, message))
        except (ConnectionRefusedError, ConnectionResetError, gaierror,
                TimeoutError):
            attempt += 1
            if attempt <= 3:
                error_message = 'Нет соединения. Повторная попытка\n'
                await write_message_to_file(args.history, error_message)
            else:
                error_message = 'Нет соединения. Повторная попытка через 3 сек.\n'
                print(await write_message_to_file(args.history, error_message))
                await asyncio.sleep(3)
                continue
        finally:
            writer.close()
Beispiel #6
0
def main():
    args = get_args()
    wandb.init()
    wandb.config.update(args)

    seed = 42
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.deterministic = True
    torch.backends.cudnn.benchmark = False

    loaded_model = False

    [train_loader, valid_loader, model,
     optimizer] = initialize(args, loaded_model)
    scaler = torch.cuda.amp.GradScaler()

    wandb.watch(model)
    best_acc = 0
    run_avg = RunningAverage()

    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min')
    # scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.001, max_lr=0.1, cycle_momentum=False)

    for epoch in range(1, args.epochs_number + 1):
        run_avg.reset_train()
        run_avg.reset_val()

        train(args, model, train_loader, epoch, optimizer, scaler, run_avg)
        val_acc = evaluation(args, model, valid_loader, epoch, run_avg)

        # scheduler.step()
        if best_acc < val_acc:
            best_acc = val_acc
            save_checkpoint(model, optimizer, args, epoch)
Beispiel #7
0
def main():
    args = get_args()

    # Training Generator/Discriminator
    if args.model == 'GAN':
        model = GAN()
    # elif args.model == 'LSGAN':
    #     model = LSGAN()
    # elif args.model == 'WGAN':
    #     model = WGAN()
    # elif args.model == 'WGAN_GP':
    #     model = WGAN_GP()
    # elif args.model == 'DRAGAN':
    #     model = DRAGAN()
    # elif args.model == 'EBGAN':
    #     model = EBGAN()
    # elif args.model == 'BEGAN':
    #     model = BEGAN()
    # elif args.model == 'SNGAN':
    #     model = SNGAN()
    elif args.model == 'AnoGAN':
        model = AnoGAN()
    model.train()

    # Anomaly Detection
    if args.model == 'AnoGAN':
        model.anomaly_detect()
Beispiel #8
0
def main():
    args = get_args()
    config = load_config('recv', str(args.config))
    init_log('hg-agent-forwarder', args.debug)
    shutdown = create_shutdown_event()

    receivers = []
    udp_recv = MetricReceiverUdp(config)
    receivers.append(udp_recv)

    tcp_recv = MetricReceiverTcp(config)
    receivers.append(tcp_recv)

    for receiver in receivers:
        receiver.start()
        logging.info("Started thread for %s", receiver)

    while not shutdown.is_set():
        time.sleep(5)

    for receiver in receivers:
        while receiver.is_alive():
            receiver.shutdown()
            receiver.join(timeout=0.1)
            time.sleep(0.1)
    logging.info("Metric receivers closed.")
Beispiel #9
0
 def __init__(self):
     self.data_dir = "data/hymenoptera_data"
     self.args = utils.get_args()
     self.data_transforms = self.data_transforms()
     self.image_datasets = {
         x: datasets.ImageFolder(
             os.path.join(self.data_dir, x), self.data_transforms[x]
         )
         for x in ["train", "val"]
     }
     self.dataloaders = {
         x: torch.utils.data.DataLoader(
             self.image_datasets[x],
             batch_size=self.args.batch_size,
             shuffle=True,
             num_workers=self.args.num_workers,
         )
         for x in ["train", "val"]
     }
     self.dataset_sizes = {
         x: len(self.image_datasets[x]) for x in ["train", "val"]
     }
     self.class_names = self.image_datasets["train"].classes
     self.device = torch.device(
         "cuda:0" if torch.cuda.is_available() else "cpu"
     )
def main():
    args = U.get_args()
    logdir = U.setup_logdir(args.exp_name, args.env_name)

    max_path_length = args.ep_len if args.ep_len > 0 else None

    for e in range(args.n_experiments):
        seed = args.seed + 10 * e
        print('Running experiment with seed %d'% seed)

        def train_func():
            train_PG(
                exp_name=args.exp_name,
                env_name=args.env_name,
                n_iter=args.n_iter,
                gamma=args.discount,
                min_timesteps_per_batch=args.batch_size,
                max_path_length=max_path_length,
                learning_rate=args.learning_rate,
                reward_to_go=args.reward_to_go,
                animate=args.render,
                logdir=os.path.join(logdir, '%d' % seed),
                normalize_advantages=not(args.dont_normalize_advantages),
                nn_baseline=args.nn_baseline,
                seed=seed,
                n_layers=args.n_layers,
                size=args.size
                )

        # Awkward hacky process runs, because Tensorflow does not like
        # repeatedly calling train_PG in the same thread.
        p = Process(target=train_func, args=tuple())
        p.start()
        p.join()
def main():
  print "Note: This is a slow test, has a couple of sleeps in it to simulate proper state changes"
  args = utils.get_args()
  vt_mysqlbinlog =  os.environ.get('VT_MYSQL_ROOT') + '/bin/vt_mysqlbinlog'
  if not os.path.isfile(vt_mysqlbinlog):
    sys.exit("%s is not present, please install it and then re-run the test" % vt_mysqlbinlog)

  try:
    suite = unittest.TestSuite()
    if args[0] == 'run_all':
      setup()
      suite.addTests(unittest.TestLoader().loadTestsFromTestCase(RowCacheInvalidator))
    else:
      if args[0] != 'teardown':
        setup()
        if args[0] != 'setup':
          for arg in args:
            if hasattr(RowCacheInvalidator,arg):
              suite.addTest(RowCacheInvalidator(arg))
    if suite.countTestCases() > 0:
      unittest.TextTestRunner(verbosity=utils.options.verbose).run(suite)
  except KeyboardInterrupt:
    pass
  except utils.Break:
    utils.options.skip_teardown = True
  finally:
    teardown()
Beispiel #12
0
def main():
    # Get the configuration arguments
    args = utils.get_args()
    utils.print_args(args)

    # Allocate a small fraction of GPU and expand the allotted memory as needed
    gpu_options = tf.GPUOptions(allow_growth=True)
    config = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)

    # Essentially defining global variables. TF_CPP_MIN_LOG_LEVEL equates to '3'
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    # Create a session with config options
    with tf.Session(config=config) as sess:
        # initialize the DNN
        mini = MiniNet(sess, args)

        # Gets all variables that have trainable=True
        model_vars = tf.trainable_variables()

        # slim is a library that makes defining, training, and evaluating NNs simple.
        tf.contrib.slim.model_analyzer.analyze_vars(model_vars, print_info=True)

        if args.training == True:
            mini.train()
        else:
            mini.test()
Beispiel #13
0
def main():
    # ---------
    # load data
    args = get_args()
    labels = load(args.labels_path)
    terms = load(args.terms_path)
    pub_med_ids, _ = read_file(args.documents_path)
    index2word = load(args.index2word_path)

    # ------
    # Encode
    params = vars(args)
    params['dropout'] = args.dropout
    params['data_size'] = len(labels)
    params['embedding_dim'] = args.embedding_dim
    params['num_epochs'] = args.num_epochs
    params['batch_size'] = args.batch_size
    params['term_size'] = args.mlp_layer_dims[-1]
    params['word_vecs_path'] = args.embedded_sentences.split('/')[1].split('.')[0]

    # get estimator
    estimator = EncodeEstimator(params)

    # todo
    out_dir = ""

    with h5py.File(args.embedded_sentences, 'r') as f:
        def sen_gen():
            for i in docs_gen(f):
                yield i[0]

        def len_gen():
            for i in docs_gen(f):
                yield i[1]

        if args.test_mode == 2:
            estimator.train(sen_gen, len_gen, labels, 1)
        else:
            estimator.train(sen_gen, len_gen, labels)

        doc_vecs, pred_labels = estimator.predict(sen_gen, len_gen)

    # ---------
    # save data
    # encoder data
    os.makedirs(out_dir)

    # write params to a txt file, except embeddings
    param_dir = out_dir + '/params.txt'
    with open(param_dir, 'w') as f:
        f.write(json.dumps(params))

    pred_lab_words = []
    for p_id, lab in zip(pub_med_ids, pred_labels):
        pred_lab = ', '.join([index2word[terms[l]]
                              for l in lab])
        line = str(p_id) + '\t' + pred_lab
        pred_lab_words.append(line)

    save_list(out_dir + '/pred_labels.txt', pred_lab_words)
Beispiel #14
0
def main():
    args = utils.get_args()
    if not args.upload_only:
        fetch_spacex(args)
        fetch_hubble(args)
    if not args.download_only:
        publish_images(args)
    clean(args)
Beispiel #15
0
def construct_wrapper(sfunc, tfunc):
    targs, tkwargs, tdefs = get_args(tfunc)
    sargs, _, _ = get_args(sfunc)
    assert len(targs) >= len(sargs), "%s has not enough arguments" % tfunc
    targs = targs[len(sargs):]
    if not targs and not tkwargs:
        return tfunc
    else:
        nms = dict(get=conf.get, getd=conf.getdefault, tfunc=tfunc, **{"_%s" % i: de for i, de in (enumerate(tdefs) if tdefs else [])})
        code = "def wrapper({sargs}, chan=None):\n" \
               " tag = self.tag\n" \
               " tfunc({sargs}{targs}{tkwargs})".format(
                   sargs=", ".join(sargs),
                   targs="".join(", get('%s', tag, chan)" % arg for arg in targs),
                   tkwargs="".join(", getd('%s', tag, chan, default=_%s)" % (arg, i) for i, arg in enumerate(tkwargs)) if tkwargs else "")
        exec code in nms
        return wraps(tfunc)(nms["wrapper"])
Beispiel #16
0
 def do(self, s, workspace, project_settings):
     method_name = s.lower().replace(" ", "_")
     method = getattr(self,method_name)
     kwarg_keys = get_args(self, method_name)
     kwargs = dict()
     for k in kwarg_keys:
         kwargs[k] = eval(k)
     return method(**kwargs)
Beispiel #17
0
def main():
    """Runs the main deep learning pipeline."""
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print('Missing or invalid arguments.')
        exit(0)

    weights_path = get_best_weights(config)

    print('Create the model.')
    model = DeepSwipeModel(config)

    print('Loading weights.')
    model.model.load_weights(weights_path)

    print('Opening VideoObject')

    cv2.namedWindow("Preview")
    cap = cv2.VideoCapture(0)

    crop_size = 224

    ACTIVE_LEN = 10
    ACTIVE_WIDTH = crop_size  # todo: change to crop size
    ACTIVE_HEIGHT = crop_size  # todo: change to crop size

    active_frames = np.zeros((ACTIVE_LEN, ACTIVE_HEIGHT, ACTIVE_WIDTH, 3))

    FRAME_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    FRAME_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    while True:
        rval, frame = cap.read()  # read in frame
        frame = crop_frame(frame, FRAME_WIDTH, FRAME_HEIGHT,
                           crop_size)  # crop frame
        frame_reshaped = np.expand_dims(frame, axis=0)  # reshape frame

        if frame is not None:
            cv2.imshow("preview", frame)  # print reshaped frame

        active_frames = np.concatenate((active_frames, frame_reshaped),
                                       axis=0)  # add frame
        active_frames = active_frames[1:, :, :, :]  # pop first frame

        now = datetime.datetime.now()
        input_video = np.expand_dims(active_frames, axis=0)
        pred = model.model.predict(input_video)  # add batch_size=1 dimension

        print(str(now), " | ", "Prediction: ", str(pred))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            cap.release()
            break

    cap.release(
    )  # prevents error in [AVCaptureDeviceInput initWithDevice:error:]
Beispiel #18
0
def main():
    args = utils.get_args()
    dataset = utils.load_dataset(os.path.join(args.data_path, DATASET_FILE))
    index2word, word2index = utils.load_dicts(os.path.join(args.data_path, VOCABULARY_FILE))
    
    print("Use dataset with {} sentences".format(dataset.shape[0]))
    
    batch_size = args.batch_size
    noise_size = args.noise_size
    with tf.Graph().as_default(), tf.Session() as session:   
        lstm_gan = LSTMGAN(
            SENTENCE_SIZE,
            VOCABULARY_SIZE,
            word2index[SENTENCE_START_TOKEN],
            hidden_size_gen = args.hid_gen,
            hidden_size_disc = args.hid_disc,
            input_noise_size = noise_size,
            batch_size = batch_size,
            dropout = args.dropout,
            lr = args.lr,
            grad_cap = args.grad_clip
        )
        
        session.run(tf.initialize_all_variables())

        if args.save_model or args.load_model:
            saver = tf.train.Saver()

        if args.load_model:
            try:
                saver.restore(session, utils.SAVER_FILE)
            except ValueError:
                print("Cant find model file")
                sys.exit(1)
        while True:
            offset = 0.
            for dataset_part in utils.iterate_over_dataset(dataset, batch_size*args.disc_count):
                print("Start train discriminator wih offset {}...".format(offset))
                for ind, batch in enumerate(utils.iterate_over_dataset(dataset_part, batch_size)):
                    noise = np.random.random(size=(batch_size, noise_size))
                    cost = lstm_gan.train_disc_on_batch(session, noise, batch)
                    print("Processed {} sentences with train cost = {}".format((ind+1)*batch_size, cost))

                print("Start train generator...")
                for ind in range(args.gen_count):
                    noise = np.random.random(size=(batch_size, noise_size))
                    cost = lstm_gan.train_gen_on_batch(session, noise)
                    if args.gen_sent:
                        sent = lstm_gan.generate_sent(session, np.random.random(size=(noise_size, )))
                        print(' '.join(index2word[i] for i in sent))
                    print("Processed {} noise inputs with train cost {}".format((ind+1)*batch_size, cost))
                
                offset += batch_size*args.disc_count
                if args.save_model:
                    saver.save(sess, utils.SAVER_FILE)
                    print("Model saved")
 def inner(*args, **kwargs):
     args_string_form = get_args(args)
     kwargs_string_form = get_kwargs(kwargs)
     if kwargs_string_form:
         delimiter = '' if not args_string_form else ', '
         print '{0}({1}{2}{3})'.format(func.func_name, args_string_form,
                                       delimiter, kwargs_string_form)
     else:
         print '{0}({1})'.format(func.func_name, args_string_form)
     return func(*args, **kwargs)
Beispiel #20
0
def evaluate():

    args = get_args()
    config = process_config(args.config)
    # load dataset file
    dataset = load_pair_paths(config)

    metric_names = []
    results = []
    model_names = []

    config.unpool_type = "simple"
    config.exp_name = "nyu-resnet-berhu-aug-30-simple-upproject"
    config.prediction_model_name = "model-150-0.19.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    config.unpool_type = "deconv"
    config.exp_name = "nyu-resnet-berhu-aug-30-deconv-upproject"
    config.prediction_model_name = "model-150-0.21.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    config.unpool_type = "checkerboard"
    config.exp_name = "nyu-resnet-berhu-aug-30-checkerboard-upproject"
    config.prediction_model_name = "model-150-0.20.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    config.unpool_type = "resize"
    config.exp_name = "nyu-resnet-berhu-aug-30-resize-upproject"
    config.prediction_model_name = "model-150-0.20.km"
    config.model_dir = os.path.join("../experiments", config.exp_name, "model/")
    config.tensorboard_dir = os.path.join("../experiments", config.exp_name, "log/")

    metric_names, result = extract(dataset, config)
    results.append(result)
    model_names.append(config.unpool_type + "_"+ config.model_type+ "_" + config.loss_type)

    print(metric_names)
    print(results)
    print(model_names)
Beispiel #21
0
def main():
    global args
    args = get_args()

    print("Creating model...")
    args.saved_args.batch_size = args.beam_width
    net = Model(args.saved_args, True)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # Make tensorflow less verbose; filter out info (1+) 
    # and warnings (2+) but not errors (3).
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    with tf.Session(config=config) as sess:

        tf.global_variables_initializer().run()
        saver = tf.train.Saver(net.save_variables_list())

        # Restore the saved variables, replacing the initialized values.
        print("Restoring weights...")
        saver.restore(sess, args.model_path)
        
        max_length = args.n
        beam_width = args.beam_width
        relevance = args.relevance
        temperature = args.temperature
        topn = args.topn

        chat1_states = initial_state_with_relevance_masking(net, sess, relevance)
        chat2_states = initial_state_with_relevance_masking(net, sess, relevance)

        response = args.initial_text # anything to start a dialog
        for i in range(args.count):

            # which test to do
            if args.test_mode == 'QA':
                response = samples.get()
                print ('Q:',response)
                chat1_states, response = get_response(response, chat1_states, net, sess, args)
                print ('A:',response)

            if args.test_mode == '1CHAT': # two chat bots talking 
                print ('S1:',response)
                chat1_states, response = get_response(response, chat1_states, net, sess, args)
                print ('A1:',response)

            if args.test_mode == '2CHAT': # two chat bots talking 
                chat1_states, response = get_response(response, chat1_states, net, sess, args)
                print ('12:',response)
                chat1_states, response = get_response(response, chat2_states, net, sess, args)
                print ('22:',response)
Beispiel #22
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([
        config['result_dir'], config['checkpoint_dir'],
        config['checkpoint_dir_lstm']
    ])
    # save the config in a txt file
    save_config(config)
    sess_centralized = tf.Session(config=tf.ConfigProto())
    data = DataGenerator(config)
    model_vae = VAEmodel(config, "Centralized")
    model_vae.load(sess_centralized)
    trainer_vae = vaeTrainer(sess_centralized, model_vae, data, config)
    # here you train your model
    if config['TRAIN_VAE']:
        if config['vae_epochs_per_comm_round'] > 0:
            trainer_vae.train()

    if config['TRAIN_LSTM']:
        # create a lstm model class instance
        lstm_model = lstmKerasModel("Centralized", config)

        # produce the embedding of all sequences for training of lstm model
        # process the windows in sequence to get their VAE embeddings
        lstm_model.produce_embeddings(model_vae, data, sess_centralized)

        # Create a basic model instance
        lstm_nn_model = lstm_model.lstm_nn_model
        lstm_nn_model.summary()  # Display the model's architecture
        # checkpoint path
        checkpoint_path = lstm_model.config['checkpoint_dir_lstm']\
                                        + "cp_{}.ckpt".format(lstm_model.name)
        # Create a callback that saves the model's weights
        cp_callback = tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_path, save_weights_only=True, verbose=1)
        # load weights if possible
        # lstm_model.load_model(lstm_nn_model, config, checkpoint_path)

        # start training
        if config['lstm_epochs_per_comm_round'] > 0:
            lstm_model.train(lstm_nn_model, cp_callback)

    sess_centralized.close()
Beispiel #23
0
def main():
    # classes
    classes = [
        "plane", "bird", "car", "cat", "deer", "dog", "horse", "monkey",
        "ship", "truck"
    ]
    # models

    args = get_args()
    config = get_config(args)
    for label in classes:
        config.dataset_path = "E:/PycharmProjects/data/cae_96_96_num_1000/" + label
        test(config, label)
Beispiel #24
0
def main():
    args, args = utils.get_args()
    ds = datasets.get_coco_kp(args.data_path, 'val', transform.ToTensor())
    data_loader = torch.utils.data.DataLoader(ds,
                                              batch_size=2,
                                              collate_fn=collate_fn)

    eng = engine.Engine.command_line_init(args)
    model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
        pretrained=True)
    model.to(eng.device)
    logger = Logger(eng.output_dir / 'keypoints_rcnn_val.json')
    eng.evaluate(model, data_loader, logger, 0)
    logger.dump()
Beispiel #25
0
def train():
    # load config file and prepare experiment
    args = get_args()
    config = process_config(args.config)
    create_dirs([config.model_dir, config.tensorboard_dir])

    # load dataset file
    dataset = load_pair_paths(config)

    # split dataset train and test
    train_pairs, test_pairs = split_dataset(config, dataset)

    if config.debug:
        print("WARNING!!! DEBUG MODE ON! 100 training.")
        train_pairs = train_pairs[:100]
        print(train_pairs)
        test_pairs = test_pairs[:100]
        print(test_pairs)

    # Calculate steps for each epoch
    train_num_steps = calculate_num_iter(config, train_pairs)
    test_num_steps = calculate_num_iter(config, test_pairs)


    # Create the model
    model = depth_model(config)

    #set dynamic output shape
    config.output_size = list(model.output_shape[1:])

    # Create train and test data generators
    train_gen = tf_data_generator(config, train_pairs, is_training=True)
    test_gen = tf_data_generator(config,test_pairs, is_training=False)

    # Prepare for training
    model.compile(optimizer=select_optimizer(config), loss=select_loss(config))


    model.fit(
        train_gen,
        steps_per_epoch=train_num_steps,
        epochs=config.num_epochs,
        callbacks=create_callbacks(config),
        validation_data=test_gen,
        validation_steps=test_num_steps,
        verbose=1)



    print("Training Done.")
def main2():
    torch.manual_seed(233)
    torch.cuda.set_device(0)
    args = get_args()
    config = Config(state_dim=args.hidden,
                    input_dim=args.input_dim,
                    hidden=args.hidden,
                    output_dim=args.num_classes,
                    epsilon=args.epsilon)
    checkpoint = torch.load("cog396test_main_episode_280.tr")
    C = models.SimpleNNClassifier(config)
    E = models.Shared_Encoder(config)
    C.load_state_dict(checkpoint['C_state_dict'])
    E.load_state_dict(checkpoint['E_state_dict'])
    C.cuda()
    E.cuda()

    X_eval, Y_eval = xor_data_generate(int(1e3))
    X_eval = X_eval.cuda()
    Y_eval = Y_eval.cuda()

    class_list = []
    x1_list = []
    x2_list = []
    colors = ['red', 'green']
    for i in range(int(1e3)):
        t = C(E(X_eval[i]))
        print("t:", t)
        if t[0][0] > t[0][1]:
            predict_label = 0
            class_list.append(0)

        else:
            predict_label = 1
            class_list.append(1)
        print("prediction:", predict_label)
        print("real label:", Y_eval[i])
        x1 = float(X_eval[i][0].cpu())
        x2 = float(X_eval[i][1].cpu())
        # print("x1:", x1)
        # print("x2:", x2)
        x1_list.append(x1)
        x2_list.append(x2)

    # fig = plt.figure(figsize=(8, 8))
    plt.scatter(x1_list,
                x2_list,
                c=class_list,
                cmap=matplotlib.colors.ListedColormap(colors))
    plt.savefig("train_c_280.png")
def main():
    """Runs the main deep learning pipeline."""
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print('Missing or invalid arguments.')
        exit(0)

    print('Create experiment directories.')
    create_dirs([
        config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir
    ])

    # TODO Refactor this
    print('Create partitions and labels.')
    partition = {}
    all_ids = [
        filename.split('.')[0] for filename in os.listdir('data')
        if filename.endswith('.npy')
    ]
    partition['train'] = all_ids[50:]
    partition['validation'] = all_ids[:50]

    labels_ids = [
        filename.split('.')[0] for filename in os.listdir('data')
        if filename.endswith('.npy')
    ]
    labels_values = [1 if 'swipe_positive_right' in filename \
                     else -1 if 'swipe_positive_left' in filename \
                     else 0 for filename in os.listdir('data') if filename.endswith('.npy')]
    labels = dict(zip(labels_ids, labels_values))

    print('Create the training and validation data generators.')
    training_generator = DeepSwipeDataGenerator(config, partition['train'],
                                                labels)
    validation_generator = DeepSwipeDataGenerator(config,
                                                  partition['validation'],
                                                  labels)
    data_generator = (training_generator, validation_generator)

    print('Create the model.')
    model = DeepSwipeModel(config)

    print('Create the trainer')
    trainer = DeepSwipeTrainer(model.model, data_generator, config)

    print('Start training the model.')
    trainer.train()
Beispiel #28
0
def main():
    args = get_args()
    config = load_config('forwarder', str(args.config))
    init_log('hg-agent-forwarder', args.debug)
    shutdown = create_shutdown_event()
    logging.info("Metric forwarder starting.")

    metric_forwarder = MetricForwarder(config, shutdown)
    metric_forwarder.start()
    while not shutdown.is_set():
        time.sleep(5)

    logging.info("Metric forwarder shutting down")
    metric_forwarder.shutdown()
    logging.info("Metric forwarder finished.")
Beispiel #29
0
def init_weight(weight, params, cons_method='abs'):
    name = get_name(params)
    coeff = get_args(params)
    if name == 'output':
        divider = weight.size(1)
    elif name == 'input':
        divider = weight.size(0)

    lim = coeff / divider
    #print('coeff=%.4e'%(coeff))
    #print('lim=%4e'%(lim))
    if cons_method == 'force':
        torch.nn.init.uniform_(weight, 0.0, 2 * lim)
    else:
        torch.nn.init.uniform_(weight, -lim, lim)
Beispiel #30
0
def get_act_func(params):
    name = get_name(params)
    coeff = get_args(params)
    if name == 'none':
        return lambda x: x
    elif name in ['relu', 'ReLU']:
        return lambda x: coeff * F.relu(x)
    elif name in ['tanh']:
        return lambda x: coeff * torch.tanh(x)
    elif name == 'relu_tanh':
        return lambda x: coeff * F.relu(torch.tanh(x))
    elif name == 'relu+tanh':
        return lambda x: coeff * F.relu(x) + (1.0 - coeff) * F.relu(
            torch.tanh(x))
    elif name == 'e_1':
        net.act_func = lambda x: F.relu(x)
    elif name == 'i_1':
        net.act_func = lambda x: (F.relu(x))**0.6
    elif name == 'e_2':
        net.act_func = lambda x: (F.relu(x))**0.9
    elif name == 'i_2':
        net.act_func = lambda x: 5 * (F.relu(x))**0.6
    elif name == 'e_3':
        if param == '': coeff = 0.1
        else: coeff = param
        net.act_func = lambda x: coeff * (F.relu(x)) + (1.0 - coeff) * (F.relu(
            torch.tanh(x)))
    elif (name == 'i_3'):
        if (param == ''):
            coeff = 0.2
        else:
            coeff = param
        net.act_func = lambda x: coeff * (F.relu(x)) + (1.0 - coeff) * (F.relu(
            torch.tanh(x)))

    #old e4_i4 == e3(0.1, thres=1.0)_i3(0.2)
    elif (name == 'e_4'):
        if (param == ''):
            coeff = 0.2
        else:
            coeff = param
        net.act_func = lambda x: coeff * (F.relu(x))
    elif (name == 'i_4'):
        if (param == ''):
            coeff = 0.4
        else:
            coeff = param
        net.act_func = lambda x: coeff * (F.relu(x))
Beispiel #31
0
def main():
  args = utils.get_args()

  try:
    if args[0] != 'teardown':
      setup()
      if args[0] != 'setup':
        for arg in args:
          globals()[arg]()
          print "GREAT SUCCESS"
  except KeyboardInterrupt:
    pass
  except utils.Break:
    utils.options.skip_teardown = True
  finally:
    teardown()
Beispiel #32
0
def main():
    args = utils.get_args()
    
    print("Prepare dataset...")
    mnist = input_data.read_data_sets("mnist/", one_hot = True)
    
    with tf.Graph().as_default(), tf.Session() as session:
        autoencoder = Autoencoder(
            784, args.hid_shape, args.lat_shape,
            optimizer = tf.train.AdagradOptimizer(args.lr),
            batch_size = args.batch_size,
            dropout = args.dropout)
        
        session.run(tf.initialize_all_variables())

        if args.save_model or args.load_model:
            saver = tf.train.Saver()

        if args.load_model:
            try:
                saver.restore(session, utils.SAVER_FILE)
            except ValueError:
                print("Cant find model file")
                sys.exit(1)
                
        if args.make_imgs:
            index = 0
            print("Prepare images directory...")
            utils.prepare_image_folder()
            example = utils.get_example(args.digit, mnist.test)
            
        print("Start training...")
        for epoch in range(args.epoches):
            for i, batch in enumerate(utils.gen_data(args.batch_size, mnist.train.images)):
                autoencoder.fit_on_batch(session, batch)
                if (i+1) % args.log_after == 0:
                    test_cost = autoencoder.evaluate(session, mnist.test.images)
                    print("Test error = {0:.4f} on {1} batch in {2} epoch".format(test_cost, i+1, epoch+1))
                    
                    if args.make_imgs:
                        path = os.path.join(utils.IMG_FOLDER, "{0:03}.png".format(index))
                        autoencoded = autoencoder.encode_decode(session, example.reshape(1, 784))
                        utils.save_image(autoencoded.reshape((28, 28)), path)
                        index += 1
            if args.save_model:
                saver.save(session, utils.SAVER_FILE)
                print("Model saved")
Beispiel #33
0
 def fit_transform(self, X_mat, y, dataset_name):
     """To be fit method of filter_chain class"""
     filters = self.filters
     selection_module = importlib.import_module('feature.selection')
     X_filt, y_filt = X_mat, y
     i = 1
     if len(filters) < 1:
         pass
     else:
         for entry in filters:
             #print "\t[" + dataset_name + "] Performing model selection (" + str(i) + '/' + str(len(filters)) + "): " + filter_name
             filter_name = list(entry.keys())[0]
             filter_class = getattr(selection_module, filter_name)
             fit_args = get_args(filter_class, 'fit')
             additional_args = filter(lambda x: x not in ['X_mat', 'y'],
                                      fit_args)
             kwargs = dict()
             for arg in additional_args:
                 kwargs[arg] = getattr(self, arg)
             filter_instance = entry[filter_name]['initialized_manipulator']
             le = self.leak_enforcer
             leak_exists = le.check_for_leak(X_filt)
             leak_allowed = le.check_leak_allowed(filter_name)
             if leak_exists:
                 if leak_allowed:
                     # TODO: output print statements like this to log file to have record somewhere
                     #print "\t\tLeak is allowed for " + filter_name + ". CV Metrics will be invalid"
                     raise Exception
                     X_dev, y_dev = X_filt, y_filt
                 else:
                     #print "\t\tLeak found for manipulator: " + filter_name +". Removing leaked indices . . ."
                     X_dev, y_dev = le.remove_leaking_indices(
                         X_filt, y_filt)
             else:
                 X_dev, y_dev = X_filt, y_filt
             filter_instance.fit(X_dev, y_dev, **kwargs)
             features = filter_instance.features
             reindexed_features = filter_instance.reindex(features)
             #filter_instance.features = reindexed_features
             setattr(filter_instance, 'features', reindexed_features)
             filter_instance.output_features()
             _, X_filt, y_filt, _ = filter_instance.split(X_filt, y)
             assert True not in pd.isnull(X_filt).any(
                 1).value_counts()  #TODO: pandas dependent
             X_filt.columns = filter_instance.features.keys()
             i += 1
     return X_filt, y_filt
Beispiel #34
0
 async def get(self, request, id_name=None):
     if isinstance(id_name, int):
         user = await Users.get_by_id(id_name)
         user = await user.to_dict()
     elif isinstance(id_name, str):
         user = await Users.get_first('email', id_name)
         user = await user.to_dict()
     else:
         if request.args:
             users = await Users.get_by_many_field_value(
                 **get_args(request.args))
         else:
             users = await Users.get_all()
         user = []
         for u in users:
             user.append(await u.to_dict())
     return json(user)
Beispiel #35
0
def main():
    """ Parses any arguments entered by the user. """
    if utils.is_NOT_root_user():
        print "You don't have the correct privileges to run dotdesk. " \
               "Please run as 'root' user."
        sys.exit()

    args = utils.get_args()
    
    if args["flag"] in ("-i", "--install"):
        install(args)
    elif args["flag"] in ("-r", "--remove"):
        remove(args)
    elif args["flag"] in ("-h", "--help"):
        print HELP_INFO
    else:
        print "Try: 'dotdesk -h' for more info."
        sys.exit()
Beispiel #36
0
def main():
  args = utils.get_args()
  vt_mysqlbinlog =  os.environ.get('VT_MYSQL_ROOT') + '/bin/vt_mysqlbinlog'
  if not os.path.isfile(vt_mysqlbinlog):
    sys.exit("%s is not present, please install it and then re-run the test" % vt_mysqlbinlog)

  try:
    if args[0] != 'teardown':
      setup()
      if args[0] != 'setup':
        for arg in args:
          globals()[arg]()
          print "GREAT SUCCESS"
  except KeyboardInterrupt:
    pass
  except utils.Break:
    utils.options.skip_teardown = True
  finally:
    teardown()
# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import CrossEntropyLoss
import os

from net import CliqueNet
from utils import get_args, get_dataloader

if __name__ == "__main__":

    args = get_args()
    train_loader, test_loader = get_dataloader(args)
    use_cuda = args.use_cuda
    num_classes = 10
    dropout_prob = 0.1
    #hyper-parameters

#    A,B,C,D,E,r = 32,32,32,32,10,args.r # a classic CapsNet
    model = CliqueNet(3, num_classes, 4, 36, attention=True, compression=True, dropout_prob=dropout_prob)
    criterion = CrossEntropyLoss()
    #closs = CrossEntropyLoss()

    with torch.cuda.device(args.gpu):
#        print(args.gpu, type(args.gpu))
        if args.pretrained:
            model.load_state_dict(torch.load(args.pretrained))
        if use_cuda:
Beispiel #38
0
def run_pbp_download(args, lm=None):
    download_relay(args, lm)


def run_pbp_parser(args, lm=None):
    parse_main(args, lm)


def run_pfx_download(args, lm=None):
    download_pfx(args, lm)


if __name__ == "__main__":
    args = []  # m_start, m_end, y_start, y_end
    options = []  # onlyConvert, onlyDownload, onlyPFXDownload
    parser = get_args(args, options)

    # option : -c, -d, -p
    if (options[0] is True) & (options[1] is False) & (options[2] is False):
        relay_lm = logManager.LogManager()  # background logger
        run_pbp_parser(args, relay_lm)
        relay_lm.killLogManager()
    elif (options[0] is False) & (options[1] is True) & (options[2] is False):
        download_lm = logManager.LogManager()  # background logger
        run_pbp_download(args, download_lm)
        download_lm.killLogManager()
    elif (options[0] is False) & (options[1] is False) & (options[2] is True):
        pfx_lm = logManager.LogManager()  # background logger
        run_pfx_download(args, pfx_lm)
        pfx_lm.killLogManager()
    elif (options[0] is False) & (options[1] is False) & (options[2] is False):
Beispiel #39
0
        answers[posdir[0] + str(posnum)] = answer

    try:
        for number, clue in puzzle.clues.across():
            cluenum = "A" + str(number)
            if cluenum not in answers:
                raise xdfile.IncompletePuzzleParse(xd, "Clue number doesn't match grid: " + cluenum)
            xd.clues.append((("A", number), decode(clue), answers.get(cluenum, "")))

        # xd.append_clue_break()

        for number, clue in puzzle.clues.down():
            cluenum = "D" + str(number)
            if cluenum not in answers:
                raise xdfile.IncompletePuzzleParse(xd, "Clue doesn't match grid: " + cluenum)
            xd.clues.append((("D", number), decode(clue), answers.get(cluenum, "")))
    except KeyError as e:
        raise xdfile.IncompletePuzzleParse(xd, "Clue doesn't match grid: " + str(e))

    return xd

if __name__ == "__main__":
    import sys
    from utils import get_args, find_files

    args = get_args(desc='parse .puz files')
    for fn, contents in find_files(*sys.argv[1:]):
        xd = parse_puz(contents, fn)
        print(xd.to_unicode())

Beispiel #40
0
    def load_app(self, ns, on_value=None, **prepost_kwargs):
        """Load annotated callbacks and from a namespace and add them
        to this client's listener's callback chain.

        :param ns: A namespace-like object containing functions marked with
            @event_callback (can be a module, class or instance).
        :params str on_value: id key to be used for registering app callbacks
            with `EventListener`
        """
        listener = self.listener
        name = utils.get_name(ns)
        app = self._apps.get(name, None)
        if not app:
            # if handed a class, instantiate appropriately
            app = ns() if isinstance(ns, type) else ns
            prepost = getattr(app, 'prepost', False)
            if prepost:
                args, kwargs = utils.get_args(app.prepost)
                funcargs = tuple(weakref.proxy(getattr(self, argname))
                                 for argname in args if argname != 'self')
                ret = prepost(*funcargs, **prepost_kwargs)
                if inspect.isgenerator(ret):
                    # run init step
                    next(ret)
                    app._finalize = ret

            # assign a 'consumer id'
            cid = on_value if on_value else utils.uuid()
            self.log.info("Loading call app '{}' for listener '{}'"
                          .format(name, listener))
            icb, failed = 1, False
            # insert handlers and callbacks
            for ev_type, cb_type, obj in marks.get_callbacks(app):
                if cb_type == 'handler':
                    # TODO: similar unloading on failure here as above?
                    listener.add_handler(ev_type, obj)

                elif cb_type == 'callback':
                    # add default handler if none exists
                    if ev_type not in listener._handlers:
                        self.log.info(
                            "adding default session lookup handler for event"
                            " type '{}'".format(ev_type)
                        )
                        listener.add_handler(
                            ev_type,
                            listener.lookup_sess
                        )
                    added = listener.add_callback(ev_type, cid, obj)
                    if not added:
                        failed = obj
                        listener.remove_callbacks(cid, last=icb)
                        break
                    icb += 1
                    self.log.debug("'{}' event callback '{}' added for id '{}'"
                                   .format(ev_type, obj.__name__, cid))

            if failed:
                raise TypeError("app load failed since '{}' is not a valid"
                                "callback type".format(failed))
            # register locally
            self._apps[name] = app
            app.cid, app.name = cid, name

        return app.cid
import logging, os
from logging.handlers import RotatingFileHandler
import utils

logger = logging.getLogger(__name__)
args = utils.get_args()
logfile = args.log
if not logfile or logfile == "":
    logfile = 'bitcasafilelister.log'

maxsize = 1 * 1024* 1024 #1mb
    
LOGLEVEL = logging.INFO
if args.verbose or args.test:
	LOGLEVEL = logging.DEBUG
logger.setLevel(LOGLEVEL)

lFormat = logging.Formatter('%(asctime)s [%(threadName)s][%(levelname)s]: %(message)s', '%m/%d/%Y %I:%M:%S')

if not args.test:
	#file logger
	filehandler = RotatingFileHandler(logfile, maxBytes=maxsize, backupCount=5)
	filehandler.setLevel(LOGLEVEL)
	filehandler.setFormatter(lFormat)
	logger.addHandler(filehandler)
	if os.path.isfile(logfile):
		logger.handlers[0].doRollover()


if args.console or args.test:
    #Console logger