Exemplo n.º 1
0
def main():
    args = process_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
    if args.seed == -1:
        RANDOMSEED = None
    else:
        RANDOMSEED = args.seed
    torch.manual_seed(RANDOMSEED)
    torch.cuda.manual_seed(RANDOMSEED)
    embedding = 'random'
    torch.backends.cudnn.deterministic = True

    config = Config(args, embedding, 'POR')
    start_time = time.time()
    print("Loading data...")
    vocab, train_data, test_data = build_adaptive_dataset(config, args.word)

    train_iter2 = build_iterator(train_data, config, doubly_flag=False)
    test_iter = build_test_iterator(test_data, config)
    print("Time used: ", get_time_dif(start_time))

    config.n_vocab = len(vocab)
    model = POR(config).cuda()
    init_model(model)
    print("start training...")
    train_por(config, model, train_iter, train_iter2, test_iter)
Exemplo n.º 2
0
def main():
    args = process_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
    if args.seed == -1:
        RANDOMSEED = None
    else:
        RANDOMSEED = args.seed
    torch.manual_seed(RANDOMSEED)
    torch.cuda.manual_seed(RANDOMSEED)
    IMP_WEIGHT = args.imp_weight
    if not IMP_WEIGHT:
        imp = torch.ones(args.class_num - 1, dtype=torch.float)
    elif IMP_WEIGHT == 1:
        pass
    else:
        raise ValueError('Incorrect importance weight parameter.')
    imp = imp.cuda()
    embedding = 'random'
    torch.backends.cudnn.deterministic = True

    config = Config(args, embedding, 'OR')
    start_time = time.time()
    print("Loading data...")
    vocab, train_data, test_data = build_dataset(config, args.word)
    train_iter = build_iterator(train_data, config, doubly_flag=False)
    # train_iter = buil_random_iterator(train_data,config)
    test_iter = build_test_iterator(test_data, config)

    print("Time used: ", get_time_dif(start_time))

    config.n_vocab = len(vocab)
    model = OR(config).cuda()
    init_model(model)
    print("start training...")
    train_or(config, model, train_iter, test_iter, imp)
Exemplo n.º 3
0
 def __init__(self, inifile):
     c = ConfigParser.ConfigParser()
     c.read(inifile)
     uri = c.get("db", "uri")
     model.init_model(model.sa.create_engine(uri))
     self.networks = c.get("scan", "networks").split(";")
     self.nmap_sudo = asbool(c.get("nmap", "use_sudo"))
Exemplo n.º 4
0
 def __init__(self, inifile):
     c = ConfigParser.ConfigParser()
     c.read(inifile)
     uri = c.get("db","uri")
     model.init_model(model.sa.create_engine(uri))
     self.networks = c.get("scan", "networks").split(";")
     self.nmap_sudo = asbool(c.get("nmap", "use_sudo"))
Exemplo n.º 5
0
	def __init__(self):
		self.engine = create_engine(config.engine_url, **config.engine_params)
		init_model(self.engine)

		self.log = logging.getLogger('batch.%s' % (self.__class__.__name__,))

		self.opt_parser = OptionParser()
		self.opt_parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False)
		self.arguments()
Exemplo n.º 6
0
def compute_features(keyframes, path_out, layer='conv5_1', max_dim=340):
    """
    store all local features from conv5_1 of vgg16 at 340 mac res and
    store then in path_data/[mode]/layer/max_dim
    """

    # create folder if it does not exist
    if not os.path.exists(path_out):
        os.makedirs(path_out)

        # init model
        model = init_model(layer)
        # message
        desc_text = "Feature extraction --> Layer: {}, Max_dim: {}, total_images={}".format(
            layer, max_dim, keyframes.shape[0])
        # process keyframes
        for k, keyframe in tqdm(enumerate(keyframes),
                                ascii=True,
                                desc=desc_text):
            feats = model.predict(preprocess_image(
                keyframe, max_dim=max_dim)).squeeze(axis=0)
            np.save(os.path.join(path_out, "{}".format(k)), feats)

    # resume computation
    else:
        computed = np.array(
            [int(k.split('.')[0]) for k in os.listdir(path_out)])
        # if features has been computed...
        if computed.shape[0] == keyframes.shape[0]:
            return path_out
        # start from the last computed...
        elif computed.shape[0] == 0:
            last = 0
        else:
            last = np.sort(computed)[::-1][0]

        # init model
        model = init_model(layer)
        desc_text = "Feature extraction --> Layer: {}, Max_dim: {}, total_images={}".format(
            layer, max_dim, keyframes.shape[0] - last)
        for k, keyframe in tqdm(enumerate(keyframes[last:]),
                                ascii=True,
                                desc=desc_text):
            feats = model.predict(preprocess_image(
                keyframe, max_dim=max_dim)).squeeze(axis=0)
            k += last
            np.save(os.path.join(path_out, "{}".format(k)), feats)

    return path_out
Exemplo n.º 7
0
def init_classifier():
    global model
    global t
    model = init_model()
    model.load_weights(model_file_name)
    with open('tokenizer.pickle', 'rb') as handle:
        t = pickle.load(handle)
def main():
    print("loading expert actions")
    expert_behavior = 'data/cloning/experts/{}.p'.format(args.envname)
    expert_policy = args.expert_policy_file

    observations, actions = parse_expert.actions(expert_behavior)

    obs_shape, action_shape = parse_expert.model_shapes(expert_behavior)

    actions = actions.reshape(-1, action_shape)
    print("standardize training data")

    obs_mean, obs_sdev = parse_expert.standardizers(expert_policy)
    obs_std = normalize(observations, obs_mean, obs_sdev)

    model = init_model(obs_shape, action_shape)

    results = defaultdict(list)
    for i in range(11):
        result = render_and_eval(model, obs_shape, action_shape, obs_mean,
                                 obs_sdev)
        results['clone_epoch'].append(result)
        model.fit(obs_std, actions, epochs=5, batch_size=256)

    results['expert'] = parse_expert.returns(expert_behavior)
    results['epochs_per_result'] = 5

    pickle.dump(results, open("results/cloning/{}.p".format(args.envname),
                              "wb"))
Exemplo n.º 9
0
def train(cfg_name, resume):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f'running on {device}')
    cfg = load_cfg(cfg_name)
    log = Logger(device=device)
    envs = make_vec_envs(**cfg['env'])
    model, n_start = init_model(cfg, envs, device, resume)
    runner = EnvRunner(rollout_size=cfg['train']['rollout_size'],
                       envs=envs,
                       model=model,
                       device=device)
    optim = ParamOptim(**cfg['optimizer'], params=model.parameters())
    agent = Agent(model=model, optim=optim, **cfg['agent'])

    cp_iter = cfg['train']['checkpoint_every']
    log_iter = cfg['train']['log_every']
    n_end = cfg['train']['steps']
    cp_name = cfg['train']['checkpoint_name']

    for n_iter, rollout in zip(trange(n_start, n_end), runner):
        agent_log = agent.update(rollout)

        if n_iter % log_iter == 0:
            log.output({**agent_log, **runner.get_logs()}, n_iter)

        if n_iter > n_start and n_iter % cp_iter == 0:
            f = cp_name.format(n_iter=n_iter // cp_iter)
            torch.save(model.state_dict(), f)
Exemplo n.º 10
0
def train(img_dir, coords_file, model_file):
    """
    Trains model on images from IMG folder with coordinates from COORDS
    csv file and saves trained model in hdf5 file MODEL
    """
    coords = read_coords(coords_file)

    X = []
    y = []
    for filename in tqdm(coords.keys(), desc='Reading data'):
        img = imread(join(img_dir, filename), as_grey=True)
        points = coords[filename]

        height, width = img.shape
        img = resize(img, (100, 100))
        points[::2] *= 100 / width
        points[1::2] *= 100 / height

        X.append(img)
        y.append(points)

    X = np.array(X).reshape((-1, 100, 100, 1))
    y = np.array(y)

    X = center_circle(X)

    model = init_model()
    model.fit(X, y, batch_size=128, epochs=1)
    model.save(model_file)
Exemplo n.º 11
0
def load():
    encoder = tfds.features.text.SubwordTextEncoder.load_from_file(
        "encoder.tf")
    model = init_model(encoder.vocab_size)
    model.load_weights("weights/weights")

    print(encoder)
    print(model)
    return encoder, model
Exemplo n.º 12
0
def render(cfg_name, steps):
    cfg = load_cfg(cfg_name)
    cfg['env']['num'] = 1
    env = make_vec_envs(**cfg['env'])
    model, n_start = init_model(cfg, env, 'cpu', resume=True)
    assert n_start > 0
    model.eval()
    print(f'running {n_start}')

    obs = env.reset()
    for n_iter in trange(steps):
        with torch.no_grad():
            a = model(obs)[0].sample().unsqueeze(1)
        obs = env.step(a)[0]
        env.render()
        time.sleep(1 / 30)
Exemplo n.º 13
0
def main():
    torch.manual_seed(1)
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices
    use_gpu = torch.cuda.is_available()
    if use_cpu: use_gpu = False

    if use_gpu:
        print("Currently using GPU {}".format(gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(1)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing model: {}".format(arch))
    model = init_model(name=arch, num_classes=576, loss_type=loss_type)
    print("Model size: {:.3f} M".format(count_num_param(model)))
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = init_optim(optim, model.parameters(), lr, weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=stepsize,
                                         gamma=gamma)

    model.train()
    cnt = 0
    for epoch in range(start_epoch, max_epoch, 2):
        for step in range(2):
            x = torch.randn(1, 3, 200, 200)
            y = torch.randint(low=0, high=576, size=(1, ), dtype=torch.int64)
            if use_gpu:
                x = x.cuda()
                y = y.cuda()
            scheduler.step()
            cnt += 1
            print(cnt, scheduler.get_lr())
            output = model(x)
            # loss = nn.CrossEntropyLoss()(output[0], y)
            loss = torch.tensor(0.0, dtype=torch.float32).cuda()
            # loss = torch.tensor(0.0, dtype=torch.float32)
            loss.requires_grad = True
            optimizer.zero_grad()
            loss.backward()
            print(loss)
            print(loss._grad)
            optimizer.step()
    print('Done.')
Exemplo n.º 14
0
def train():
    # TODO: Move this to config
    test_ratio = 1 / 4

    # Receiving clean data arrays here
    print("Parsing data...")
    train_text, train_lbls, test_text, test_lbls, len1, len2 = parser2.get_clean_data(
    )

    # Shuffle data for more plain distribution

    print(test_text.shape)
    print(test_lbls.shape)
    print(train_text.shape)
    print(train_lbls.shape)

    # TODO: Move this to config
    print("Compiling model...")
    # Init optimizer, loss function and metrics
    optimizer = Adam(lr=0.1)
    loss = 'sparse_categorical_crossentropy'
    metrics = ['accuracy']  # ['categorical_accuracy']

    m = model.init_model(len1, len2)
    m.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    # TODO: Move this to config
    print("Establishing checkpoints...")
    checkpoint_path = "training/cp.ckpt"

    # Train model with new callback
    print("Training model...")
    fit = m.fit(train_text,
                train_lbls,
                epochs=5,
                validation_data=(test_text,
                                 test_lbls))  # Pass callback to training

    # valuate = m.evaluate(test_text, test_lbls)

    m.save(f'training/model.h5')

    print('Training complete:')
    print(fit.history)
    print("Accuracy: {}".format(fit.history['val_accuracy'][-1]))
    print("Loss: {}".format(fit.history['val_loss'][-1]))
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('expert_policy_file', type=str)
    parser.add_argument('envname', type=str)
    args = parser.parse_args()
    expert_policy = args.expert_policy_file
    expert_behavior = 'data/cloning/experts/{}.p'.format(args.envname)
    print('loading and building expert policy')
    expert_model = load_policy.load_policy(expert_policy)
    print('loaded and built')

    obs_shape, action_shape = parse_expert.model_shapes(expert_policy)

    learning_model = init_model(obs_shape, action_shape)

    obs_mean, obs_sdev = parse_expert.standardizers(expert_policy)

    results = defaultdict(list)

    with tf.Session():
        tf_util.initialize()

        for i in range(50):
            print("iteration {}".format(i))
            returns, observations, expert_actions = gather_observations(
                learning_model, expert_model, args.envname, obs_mean, obs_sdev)
            print('')
            print("n resets: {}".format(len(returns)))
            print("score: {} *-* {} *-* {}".format(
                np.mean(returns) - np.std(returns), np.mean(returns),
                np.mean(returns) + np.std(returns)))
            results['dagger_epoch'].append(returns)
            obs_std = normalize(observations, obs_mean, obs_sdev)
            expert_actions = expert_actions.reshape(-1, action_shape)
            learning_model.fit(obs_std,
                               expert_actions,
                               epochs=10,
                               batch_size=16)

        results['expert'] = parse_expert.returns(expert_behavior)
        pickle.dump(results,
                    open("results/dagger/{}.p".format(args.envname), "wb"))
Exemplo n.º 16
0
def eval(args):
    model = init_model()
    state_dict = torch.load(args.weights, map_location=args.device)
    model.load_state_dict(state_dict)
    model.to(args.device)
    model.eval()
    transform_test = Compose([
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    test_dataset = Imagenette(DATAPATH,
                              mode='val',
                              size=IMG_SIZE,
                              transform=transform_test)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              num_workers=NUM_WORKERS,
                                              pin_memory=True)

    all_output = []
    all_gt = []
    with torch.no_grad():
        for img, labels in tqdm(test_loader):
            img = img.to(args.device).float()
            labels = labels.to(args.device)
            output = model(img)
            all_gt.extend(labels.max(1)[1].cpu().numpy().tolist())
            all_output.extend(output.max(1)[1].cpu().numpy().tolist())
    # report = classification_report(all_gt, all_output)
    accuracy = accuracy_score(all_gt, all_output)

    # print(report)
    print("\nTest Accuracy {:.4f}".format(accuracy))
Exemplo n.º 17
0
#!/usr/bin/env python
import site
site.addsitedir('/srv/www/domains/mhjones.org')
site.addsitedir('/srv/www/domains/mhjones.org/virtualenv/lib/python2.6/site-packages')

from tornado.options import logging
import wsgiref

from sqlalchemy import create_engine
import tornado.wsgi

import config
from model import init_model
import handlers

log = logging.getLogger('app.wsgi')

application = tornado.wsgi.WSGIApplication(handlers.app_urls.urls, **config.http_params)

read_conn = create_engine(config.engine_url, **config.engine_params)
init_model(read_conn)

if __name__ == "__main__":
    wsgiref.handlers.CGIHandler().run(application)
Exemplo n.º 18
0
def test_users_model_api_database():
    sys.path.insert(0, os.getcwd()+'/examples/user/database-model')
    try: 
        from authkit.users.sqlalchemy_driver import UsersFromDatabase
    except ImportError:
        raise Exception("Could not run the SQLAlchemy tests, not installed")
    if os.path.exists("test.db"):
        os.remove("test.db")
    import model as test_model

    # Setup SQLAlchemy database engine
    from sqlalchemy import engine_from_config
    engine = engine_from_config({'sqlalchemy.url':'sqlite:///test.db'}, 'sqlalchemy.')
    test_model.init_model(engine)
    test_model.engine = engine
 
    d = UsersFromDatabase(test_model)
    
    test_model.meta.metadata.create_all(test_model.engine)
    
    d.role_create("wiki")
    d.role_create("adMin")
    d.role_create("editor")
    d.group_create("pyLOns")
    d.group_create("dJAngo")
    d.user_create("jaMEs", "passWOrd1", "pyLoNs")
    d.user_create("ben", "password2")
    d.user_create("Simon", "password3")
    d.user_create("ian", "paSsword4")
    assertEqual(d.list_roles(),["admin", "editor", "wiki"])
    assertEqual(d.list_groups(),["django", "pylons"])
    assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
    assertEqual(d.user_has_password("james", "passWOrd1"), True)
    assertEqual(d.user_has_password("james", "password1"), False)
    
    d.role_create("test_role")
    d.group_create("test_group")
    d.user_create("test_user", "password")
    assertEqual(d.list_roles(),["admin", "editor", "test_role", "wiki"])
    assertEqual(d.list_groups(),["django", "pylons", "test_group"])
    assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon', "test_user"])
    d.role_delete("test_role")
    d.group_delete("test_group")
    d.user_delete("test_user")
    assertEqual(d.list_roles(),["admin", "editor", "wiki"])
    assertEqual(d.list_groups(),["django", "pylons"])
    assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])

    assertEqual(d.user_has_role("james", "admin"), False)
    d.user_add_role("james", "admin")
    assertEqual(d.user_has_role("james", "admin"), True)
    d.user_remove_role("james", "admin")
    assertEqual(d.user_has_role("james", "admin"), False)

    d.user_add_role("james", "wiki")
    d.user_add_role("simon", "wiki")
    d.user_add_role("james", "admin")
    #d.user_add_role("james", "editor")
    d.user_add_role("ben", "editor")
    
    assertEqual(d.user_has_group("james", "pylons"), True)
    assertEqual(d.user_has_group("simon", None), True)
    assertEqual(d.user_has_group("simon", "django"), False)
    d.user_set_group("simon", "dJangO")
    assertEqual(d.user_has_group("simon", None), False)
    d.user_set_group("bEn", "PyLONS")
    assertEqual(d.user_has_group("simon", "django"), True)
    assertEqual(d.user_has_group("bEn", "pYlons"), True)
    d.user_remove_group("bEn")
    assertEqual(d.user_has_group("bEn", "pYlons"), False)
    d.user_set_group("bEn", "PyLONS")
    assertEqual(d.user_has_group("bEn", "pYlons"), True)
    
    assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
    d.user_set_username("james", "jim")
    assertEqual(d.list_users(),['ben', 'ian', 'jim', 'simon'])
    d.user_set_username("jim", "james")
    
    from authkit.users import UsersFromFile, UsersFromString, AuthKitNoSuchUserError, AuthKitNoSuchGroupError,AuthKitNoSuchRoleError
    string_data = """jaMEs:passWOrd1:pyLOns wiki adMin
    ben:password2:pylons admin editor
    simon:password3:dJAngo
    ian:paSsword4 wiki
    """
    filename = 'test/user_file_data.txt'
    
    s = UsersFromString(string_data)
    f = UsersFromFile(filename)

    # Test Parsing
    assertAllEqual(
        s.passwords,
        f.passwords,
        {
            'james':'passWOrd1',
            'ben':'password2',
            'simon':'password3',
            'ian':'paSsword4',
        },
    )
    assertAllEqual(
        s.roles, 
        f.roles,
        {
            'james':['admin', 'wiki'],
            'ben':['admin','editor'],
            'ian':['wiki'],
            'simon':[],
        },
    )
    assertAllEqual(
        s.groups, 
        f.groups,
        {
            'james':'pylons',
            'ben':'pylons',
            'ian': None,
            'simon':'django',
        },
    )
    assertAllEqual(
        s.usernames, 
        f.usernames,
        ['ben', 'ian', 'james', 'simon'],
    )

    # Test list functions
    assertAllEqual(
        s.list_users(),
        f.list_users(),
        d.list_users(),
        ['ben', 'ian', 'james', 'simon'],
    )
    assertAllEqual(
        s.list_roles(), 
        f.list_roles(),
        d.list_roles(),
        ['admin', 'editor', 'wiki'],
    )
    assertAllEqual(
        s.list_groups(), 
        f.list_groups(),
        d.list_groups(),
        ['django','pylons'],
    )

    # Test user has functions
    assertAllEqual(
        s.user_has_role('jAMes','WiKi'), 
        f.user_has_role('jAMes','WiKi'), 
        d.user_has_role('jAMes','WiKi'), 
        True
    )
    assertAllEqual(
        s.user_has_role('jAMes','editOr'), 
        f.user_has_role('jAMes','editOr'), 
        d.user_has_role('jAMes','editOr'), 
        False
    )
    
    assertAllEqual(
        s.user_has_group('jAMeS','PyLons'), 
        f.user_has_group('jAMes','pylOns'), 
        d.user_has_group('jAMes','pylOns'), 
        True
    )
    assertAllEqual(
        s.user_has_group('jameS','djaNgo'), 
        f.user_has_group('JAMes','djAngo'), 
        d.user_has_group('JAMes','djAngo'), 
        False
    )

    assertAllEqual(
        s.user_has_password('jAMeS','passWOrd1'), 
        f.user_has_password('jAMes','passWOrd1'), 
        d.user_has_password('jAMes','passWOrd1'), 
        True
    )
    assertAllEqual(
        s.user_has_password('jameS','PASSWORD1'), 
        f.user_has_password('JAMes','PASSWORD1'), 
        d.user_has_password('JAMes','PASSWORD1'), 
        False
    )

    # Existence Methods
    assertAllEqual(
        s.user_exists('jAMeS'), 
        f.user_exists('jAMes'), 
        d.user_exists('jAMes'), 
        True
    )
    assertAllEqual(
        s.user_exists('nobody'), 
        f.user_exists('nobody'), 
        d.user_exists('nobody'), 
        False
    )
    
    # Existence Methods
    assertAllEqual(
        s.role_exists('wiKi'), 
        f.role_exists('Wiki'), 
        d.role_exists('Wiki'), 
        True
    )
    assertAllEqual(
        s.role_exists('norole'), 
        f.role_exists('norole'), 
        d.role_exists('norole'), 
        False
    )
    
    assertAllEqual(
        s.group_exists('pyLons'), 
        f.group_exists('PYlons'), 
        d.group_exists('PYlons'), 
        True
    )
    assertAllEqual(
        s.group_exists('nogroup'), 
        f.group_exists('nogroup'), 
        d.group_exists('nogroup'), 
        False
    )


    # User Methods
    
    assertAllEqual(
        s.user('James'), 
        f.user('James'),
        d.user('James'),
        {
            'username': '******',
            'group':    'pylons',
            'password': '******',
            'roles':    ['admin','wiki'],
        }
    )
    
    # Test all user methods raise:
    for plugin in [s,f,d]:
        for func in [
            'user',
            'user_roles',
            'user_group',
            'user_password',
        ]:
            try:
                getattr(plugin, func)('nouser')
            except AuthKitNoSuchUserError, e:
                pass
            else:
                raise AssertionError("Failed to throw a no user error")
Exemplo n.º 19
0
logging.basicConfig(
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

class_dict, counters, label_to_name = init_model()


def predict(label_name):
    try:
        class_id = class_dict[label_name]
    except KeyError:
        return {}
    sorted_classes = sorted(enumerate(counters[class_id]),
                            reverse=False,
                            key=lambda x: x[1])
    sorted_classes = [x for x in sorted_classes if x[1] > 0]
    return [{
        "prediction": label_to_name[label],
        "confidence": probability
    } for label, probability in sorted_classes]
Exemplo n.º 20
0
    #parser.add_argument('model', type=str,
    #help='Path to model definition json. Model weights should be on the same path.')
    #args = parser.parse_args()
    #with open(args.model, 'r') as jfile:
    # NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
    # then you will have to call:
    #
    #   model = model_from_json(json.loads(jfile.read()))\
    #
    # instead.
    #model = model_from_json(jfile.read())

    #model.compile("adam", "mse")
    #weights_file = args.model.replace('json', 'h5')
    #model.load_weights(weights_file)

    guided = True

    with tf.device('/cpu:0'):
        concept_model, discriminator, visual_generator, action_generator, generator_discriminator, discriminator_copy = model.init_model(
            (40, 80, 1))

    images = deque([])
    actions = deque([])

    # wrap Flask application with engineio's middleware
    app = socketio.Middleware(sio, app)

    # deploy as an eventlet WSGI server
    eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
Exemplo n.º 21
0
    ReplyKeyboardMarkup, KeyboardButton, \
    InlineKeyboardMarkup, InlineKeyboardButton
from aiofiles import os as aio_os
from torchvision import transforms
from model import init_model, predict_image

N_BEST = 5
best_recipes = dict()
TYPE = 'image'

# bot = Bot(token=os.environ.get('TOKEN', None),
TOKEN = 'TOKEN'
bot = Bot(token=TOKEN)
dp = Dispatcher(bot, storage=MemoryStorage())

init_model()
logging.info("Model was init")
logging.basicConfig(
    filename='log.txt',
    filemode='a',
    format='%(asctime)s, %(msecs) d %(name)s %(levelname) s %(message) s',
    datefmt='%H:%M:%S',
    level=logging.INFO)

inline_keyboard_markup = types.InlineKeyboardMarkup()
inline_keyboard_markup.add(
    types.InlineKeyboardButton('Применить стиль Сезанна',
                               callback_data='sezanne'))


class TestStates(Helper):
Exemplo n.º 22
0
import model
from authkit.users.sqlalchemy_driver import UsersFromDatabase

# Setup SQLAlchemy database engine
from sqlalchemy import engine_from_config

engine = engine_from_config({"sqlalchemy.url": "sqlite:///test.db"}, "sqlalchemy.")
model.init_model(engine)
model.engine = engine

users = UsersFromDatabase(model)
model.meta.metadata.create_all(model.engine)
users.group_create("pylons")
users.role_create("admin")
users.user_create("james", password="******", group="pylons")
users.user_create("ben", password="******")
users.user_add_role("ben", role="admin")

# Commit the changes
model.meta.Session.commit()
model.meta.Session.remove()
Exemplo n.º 23
0
import numpy as np
import model

Wxh, Whh, Why, bh, by = model.init_model()  #model parameters


def Loss(inputs, targets, hprev):
    x, h, y, p = {}, {}, {}, {}  #Empty dicts
    loss = 0.0

    h[-1] = np.copy(hprev)
    #forward pass
    for t in range(len(inputs)):
        x[t] = np.zeros(
            (1, data.vocab_size))  # for one-hot-row vector representation
        x[t][0][inputs[
            t]] = 1  # placing the t-th input in one-hot-row vector representation
        h[t] = np.tanh(np.dot(x[t], Wxh) + np.dot(h[t - 1], Whh) +
                       bh)  # hidden state
        y[t] = np.dot(h[t], Why) + by  #output
        p[t] = np.exp(y[t]) / np.sum(np.exp(
            y[t]))  # probabilities for output chars
        loss += -np.log(p[t][0][targets[t]])  # softmax loss

    #bakward pass
    dWxh, dWhh, dWy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(
        Why)
    dbh, dby = np.zeros_like(bh), np.zeros_like(by)
    dhprev = np.zeros_like(hs[0])

    for t in reversed(range(len(inputs))):
Exemplo n.º 24
0
def main():
    parser = create_parser()
    args = parser.parse_args()

    if args.setup:
        create_directories()

    if args.debug:
        dataset = DATASETS['debug']
        args.dataset = "debug"
        features, _, labels, _ = preprocess_data(args.patch_size,
                                                 args.distribution,
                                                 dataset=dataset)
        #print(features, 'debug')
        #print("length of features: ",type(features), len(features),'element.shape: ',features[0][0])
        features_train, features_test = features[:100], features[100:120]
        labels_train, labels_test = labels[:100], labels[100:120]
    elif args.train_model or args.evaluate_model or args.preprocess_data:
        dataset = DATASETS[args.dataset]
        #print(dataset.values())
        load_from_cache = not args.preprocess_data
        try:
            features_train, features_test, labels_train, labels_test = preprocess_data(
                args.patch_size,
                args.distribution,
                dataset=dataset,
                only_cache=load_from_cache)
            #print(features_train, 'train_model or evaluate_model or preprocess_data')
            print("Length of features_train: ", len(features_train))
        except IOError:
            print("Cache file does not exist. Please run again with -p flag.")
            sys.exit(1)

        if args.visualise:
            visualise_labels(labels_train, args.patch_size, LABELS_DIR)
            visualise_labels(labels_test, args.patch_size, LABELS_DIR)

    if not args.model_id:
        timestamp = time.strftime("%d_%m_%Y_%H%M")
        model_id = "{}_{}_{}".format(timestamp, args.dataset,
                                     args.architecture)
    else:
        model_id = args.model_id

    if args.init_model or args.train_model or args.evaluate_model:
        model_dir = os.path.join(OUTPUT_DIR, model_id)
        save_makedirs(model_dir)

    # Hyperparameters for the model. Since there are so many of them it is
    # more convenient to set them in the source code as opposed to passing
    # them as arguments to the Command Line Interface. We use a list of tuples instead of a
    # dict since we want to print the hyperparameters and for that purpose
    # keep them in the predefined order.
    hyperparameters = [
        ("architecture", args.architecture),
        # Hyperparameters for the first convolutional layer.
        ("nb_filters_1", 64),
        ("filter_size_1", 9),
        ("stride_1", (2, 2)),
        # Hyperparameter for the first pooling layer.
        ("pool_size_1", (2, 2)),
        # Hyperparameter for the second convolutional layer (when
        # two layer architecture is used).
        ("nb_filters_2", 128),
        ("filter_size_2", 5),
        ("stride_2", (1, 1)),
        # Hyperparameters for Stochastic Gradient Descent.
        ("learning_rate", 0.05),
        ("momentum", 0.9),
        ("decay", 0.0)
    ]

    hyperparameters_mnih = [
        ("architecture", args.architecture),
        # Hyperparameters for the first convolutional layer.
        ("nb_filters_1", 64),
        ("filter_size_1", 16),
        ("stride_1", (4, 4)),
        # Hyperparameter for the first pooling layer.
        ("pool_size_1", (2, 2)),
        ("pool_stride", 1),
        # Hyperparameter for the second convolutional layer).
        ("nb_filters_2", 112),
        ("filter_size_2", 4),
        ("stride_2", (1, 1)),
        # Hyperparameter for the third convolutional layer).
        ("nb_filters_3", 80),
        ("filter_size_3", 3),
        ("stride_3", (1, 1)),

        # Hyperparameters for Stochastic Gradient Descent.
        ("learning_rate", 0.05),
        ("momentum", 0.9),
        ("decay", 0.0)
    ]

    if args.init_model:
        model = init_model(args.patch_size, model_id,
                           **dict(hyperparameters_mnih))
        save_model_summary(hyperparameters_mnih, model, model_dir)
    elif args.train_model or args.evaluate_model:
        hyperparameters = dict(hyperparameters_mnih)
        model = load_model(model_id)
        model = compile_model(model, hyperparameters["learning_rate"],
                              hyperparameters['momentum'],
                              hyperparameters["decay"])

    if args.train_model:
        model = train_model(model,
                            features_train,
                            labels_train,
                            args.patch_size,
                            model_id,
                            model_dir,
                            nb_epoch=args.epochs,
                            checkpoints=args.checkpoints,
                            tensorboard=args.tensorboard,
                            earlystop=args.earlystop)

    if args.evaluate_model:
        evaluate_model(model,
                       features_test,
                       labels_test,
                       args.patch_size,
                       model_dir,
                       out_format=args.out_format)
def updated_training_loop():

    gen_iterations = 0
    if opt.oldmodelD:
        D = net_d()
        init_model(D, 'normal')
    else:
        D = SRGAN_D(6, 1)
        init_model(D, 'normal')
    if opt.CGAND:
        D = CGAN_D(opt)
        init_model(D, 'normal')
    if opt.wasserstein:
        D = DCGAN_D(256, 6, 32, 1)
    if opt.netg_dropout:
        G = net_g_dropout()
    else:
        G = net_g()
    #init_model(G, 'normal')
    #
    D.train()
    G.train()
    losses = []
    start = time.time()
    EPS = opt.eps
    G.cuda()
    D.cuda()
    beta_1 = 0.5
    if opt.loss == 'mse':
        criterionGAN = nn.MSELoss()  #GANLoss()
    elif opt.loss == 'ganloss':
        criterionGAN = GANLoss()

    criterionL1 = torch.nn.L1Loss()
    if opt.wasserstein:
        optimizer_D = torch.optim.RMSprop(D.parameters(), lr=opt.lrD)
        optimizer_G = torch.optim.RMSprop(G.parameters(), lr=opt.lrG)
    else:
        optimizer_G = torch.optim.Adam(G.parameters(),
                                       lr=opt.lrG,
                                       betas=(opt.beta_1, 0.999))
        optimizer_D = torch.optim.Adam(D.parameters(),
                                       lr=opt.lrD,
                                       betas=(opt.beta_1, 0.999))

    total_epochs = opt.total_epochs
    for epoch in range(total_epochs):
        #print(epoch)
        experiment.log_current_epoch(epoch)

        data_iter = iter(dataloader)
        i = 0
        while i < len(dataloader):

            ############################
            # (1) Update D network
            ###########################
            for p in D.parameters():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update

            # train the discriminator Diters times
            if epoch < opt.D_pretrain or (epoch + 1) % 50 == 0:
                Diters = 100
            else:
                Diters = opt.Diters
            j = 0
            #Diters = 5
            while j < Diters and i < len(dataloader):
                j += 1

                lr, hr = data_iter.next()
                i += 1

                # Drop the last batch if it's not the same size as the batchsize
                if lr.size(0) != opt.batch_size:
                    break
                lr = lr.cuda()
                hr = hr.cuda()

                # train with real

                D.zero_grad()

                padding = torch.zeros(opt.batch_size, 3, 192, 64).cuda()
                padding2 = torch.zeros(opt.batch_size, 3, 256, 192).cuda()
                real_lr = torch.cat((torch.cat((lr, padding), 2), padding2), 3)

                inputy = Variable(torch.cat((real_lr, hr), 1))
                #inputy = Variable(hr)

                errD_real = D(inputy, True)  # can modify to feed inputv too

                padding = torch.zeros(opt.batch_size, 3, 192, 64).cuda()
                padding2 = torch.zeros(opt.batch_size, 3, 256, 192).cuda()
                real_lr = torch.cat((torch.cat((lr, padding), 2), padding2), 3)

                # completely freeze netG while we train the discriminator
                inputg = Variable(lr, volatile=True)
                #fake = Variable(netG(inputg).data)
                fake = Variable(torch.cat((real_lr, G(inputg).data), 1))
                errD_fake = D(fake, True)

                # calculate discriminator loss and backprop
                # errD = 0.5 * (torch.mean((errD_real - 1)**2) + torch.mean(errD_fake**2))
                if opt.wasserstein:
                    errD = -torch.mean(errD_real) + torch.mean(errD_fake)
                else:
                    errD = -torch.mean(
                        torch.log(errD_real + EPS) +
                        torch.log(1 - errD_fake + EPS))

                errD.backward()

                optimizer_D.step()
                if opt.wasserstein:
                    for p in D.parameters():
                        p.data.clamp_(-opt.clamp, opt.clamp)

    #         print("here")
    ############################
    # (2) Update G network
    ###########################
            for p in D.parameters():
                p.requires_grad = False  # to avoid computation
            G.zero_grad()

            lr = lr.cuda()
            hr = hr.cuda()
            if lr.size(0) != opt.batch_size:
                break
            input_lr = Variable(lr)
            input_hr = Variable(hr)

            padding = torch.zeros(opt.batch_size, 3, 192, 64).cuda()
            padding2 = torch.zeros(opt.batch_size, 3, 256, 192).cuda()
            #         print(lr.size())
            #         print(padding.size())
            #         print(padding2.size())
            real_lr = torch.cat((torch.cat((lr, padding), 2), padding2), 3)

            fake = G(input_lr)

            #errG_1 = D(Variable(torch.cat((real_lr, fake.data), 1),requires_grad=True))
            errG_1 = D(torch.cat((Variable(real_lr), fake), 1))
            # maximize log(D) instead of minimize log(1 - D)
            if opt.wasserstein:
                errG = -torch.mean(errG_1)
            else:
                errG = -torch.mean(torch.log(errG_1 + EPS))
            # generator accumulates loss from discriminator + MSE with true image
            # loss_MSE = MSE(fake, input_hr)
            # loss_SSIM = ssim_loss(fake, input_hr)
            # loss_G = 0.25*errG + loss_MSE - loss_SSIM
            loss_G = errG
            #loss_G = criterionL1(fake, input_hr)
            loss_G.backward()
            losses.append(loss_G.data)
            optimizer_G.step()
            gen_iterations += 1

        if epoch > opt.D_pretrain and (epoch + 1) % 1 == 0:
            print_loss_avg = np.mean(losses).cpu().numpy()[0]
            experiment.log_metric("average_g_loss", 100 * print_loss_avg)
            experiment.log_metric("actual_g_loss",
                                  losses[-1].cpu().numpy()[0] * 100)
            print('%s (%d %d%%) %.4f' %
                  (timeSince(start, (epoch + 1) / total_epochs), epoch,
                   epoch / total_epochs * 100, print_loss_avg))

        if (epoch + 1) % 100 == 0:
            print('saving model at epoch', epoch)
            torch.save(
                G.state_dict(),
                "checkpoints/G_{0}_{1}.pth".format(opt.output_path, epoch))
            torch.save(
                D.state_dict(),
                "checkpoints/D_{0}_{1}.pth".format(opt.output_path, epoch))
Exemplo n.º 26
0
def train(opt_path):
    ## initialization
    log_saveroot = '../logs'
    opt = utils.read_option(opt_path, log_saveroot)
    if opt['use_gpu']:
        device = 'cuda'
        os.environ['CUDA_VISIBLE_DEVICES'] = opt['gpu_id']
    else:
        device = 'cpu'
    if opt['fixed_random']:
        utils.set_random_seed()
    opt_train = opt['train']
    opt_data = opt['dataset']
    if opt_data['type'] == 1:
        out_nc = 20
    elif opt_data['type'] == 2:
        out_nc = 100
    pprint.pprint(opt)

    ## define dataset
    train_dataset = dataset.Q1Dataset_pic(opt_data=opt_data,
                                          phase='train',
                                          root=None)
    val_dataset = dataset.Q1Dataset_pic(opt_data=opt_data,
                                        phase='val',
                                        root=None)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=opt_train['batch_size'],
                                  num_workers=opt_train['num_workers'],
                                  shuffle=True)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=opt_train['batch_size'],
                                num_workers=opt_train['num_workers'],
                                shuffle=False)

    ## define network, optimizer, scheduler, criterion
    net_m = model.init_model(opt_train, out_nc).to(device)
    net_m2 = model.init_model(opt_train, out_nc).to(device)
    net_m.model = nn.DataParallel(net_m.model)
    net_m2.model = nn.DataParallel(net_m2.model)
    optimizer = optim.init_optim(opt_train, net_m)
    optimizer2 = optim.init_optim(opt_train, net_m2)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     opt_train['milestones'],
                                                     gamma=0.2)
    scheduler2 = torch.optim.lr_scheduler.MultiStepLR(optimizer2,
                                                      opt_train['milestones'],
                                                      gamma=0.2)
    if opt['loss'] == "CEloss":
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = nn.CrossEntropyLoss()

    ## define tensorboard
    path_log = opt['save_root']
    all_scalars = {}
    writer = utils.create_writer(path_log)

    ## train
    with tqdm(total=opt_train['epoch'] * len(train_dataloader)) as pbar:
        for epoch in range(opt_train['epoch']):
            train_loss = 0.0
            train_accu = 0.0
            total = 0
            total_right = 0
            net_m.train()
            net_m2.train()
            scheduler.step()
            scheduler2.step()

            for img, label in train_dataloader:
                optimizer.zero_grad()
                optimizer2.zero_grad()
                img = img.to(device)
                label = label.to(device)
                total += label.shape[0]

                if opt_train['model'] == 'ResNext':
                    att_outputs, out, _ = net_m(img)
                    att_loss = criterion(att_outputs, label)
                    per_loss = criterion(out, label)
                    # loss = att_loss + per_loss
                    att_outputs2, out2, _ = net_m2(img)
                    att_loss2 = criterion(att_outputs2, label)
                    per_loss2 = criterion(out2, label)
                    loss = att_loss + per_loss + att_loss2 + per_loss2
                else:
                    out = net_m(img)
                    out2 = net_m2(img)
                    loss = criterion(out, label) + criterion(out2, label)
                loss.backward()
                if opt_train['optim'] == 'SAM':
                    optimizer.first_step(zero_grad=True)
                    criterion(net_m(img), label).backward()
                    optimizer.second_step(zero_grad=True)
                    optimizer2.first_step(zero_grad=True)
                    criterion(net_m2(img), label).backward()
                    optimizer2.second_step(zero_grad=True)
                else:
                    optimizer.step()
                    optimizer2.step()
                train_loss += loss.item()

                _, pred = (out + out2).max(1)
                total_right += (pred == label).sum().item()
                pbar.update(1)

            train_accu = total_right / total
            writer, all_scalars = utils.add_scalar(path_log, all_scalars,
                                                   writer, 'train/loss',
                                                   train_loss, epoch)
            writer, all_scalars = utils.add_scalar(path_log, all_scalars,
                                                   writer, 'train/accu',
                                                   train_accu, epoch)
            tqdm.write("Epoch: %d, loss: %.6f, accu: %.6f" %
                       ((epoch + 1), train_loss, train_accu))

            ## val
            if ((epoch + 1) % opt_train['val_interval'] == 0):
                net_m.eval()
                net_m2.eval()

                val_total = 0
                val_total_right = 0
                for img, label in val_dataloader:
                    img = img.to(device)
                    label = label.to(device)
                    val_total += label.shape[0]

                    with torch.no_grad():
                        if opt_train['model'] == 'ResNext':
                            _, out, _ = net_m(img)
                            _, out2, _ = net_m2(img)
                        else:
                            out = net_m(img)
                            out2 = net_m2(img)
                        _, pred = (out + out2).max(1)
                        val_total_right += (pred == label).sum().item()

                val_accu = val_total_right / val_total
                writer, all_scalars = utils.add_scalar(path_log, all_scalars,
                                                       writer, 'val/accu',
                                                       val_accu, epoch)
                tqdm.write("VAL Epoch: %d, VAL accu: %.6f" %
                           ((epoch + 1), val_accu))

            ## save model
            if ((epoch + 1) % opt_train['save_interval'] == 0):
                save_model_path = os.path.join(
                    opt['save_root'] + '/checkpoints',
                    'model_%05d.pth' % (epoch + 1))
                net_m.save(save_model_path)
                save_model_path = os.path.join(
                    opt['save_root'] + '/checkpoints',
                    'model2_%05d.pth' % (epoch + 1))
                net_m2.save(save_model_path)
Exemplo n.º 27
0
Arquivo: server.py Projeto: qbaoma/web
''' write for using webservice '''
from Control.main import externalWebservice, OfficialWebservice

''' All server Initialization''' 
from model import init_model
from model.__cmdbinit__ import cmdbinit_model
from model.initData import initdata
from model.cminitData import cminitdata

from BaseClass.logger import LoggerRecord
logger = LoggerRecord().initlog()
        
if __name__ == '__main__':

    init_model() # init all tables 
    cmdbinit_model() # init cmdb all tables 
    initdata()   # init all data  
    cminitdata() # init cmdb all data
    
    # starting tcpserver
    factory = Factory()

    # starting webservice
    soap_app=soaplib.core.Application([externalWebservice, OfficialWebservice], 'tns')
    wsgi_app=wsgi.Application(soap_app)
    
    # outside Statement
    logger.debug('* Webservice started on port:7789... ')
    logger.debug('* wsdl AT: http://192.168.82.89:7789/SOAP/?wsdl')
    logger.debug('* wsdl AT: http://222.73.33.131:7789/SOAP/?wsdl')
Exemplo n.º 28
0
    # model_idx = options.model
    batch_size = options.batch_size
    num_epoch = options.epochs
    data_root = options.data_root
    input_size = options.input_size

    train_csv_path = data_root + "train.csv"
    test_csv_path = data_root + "test.csv"
    images_dir = data_root + "images/"
    submission_df_path = data_root + "sample_submission.csv"

    num_classes = 4
    num_cv_folds = 5

    device = get_device()
    model, _ = init_model(num_classes, use_pretrained=options.pre_train)

    feature_center = torch.zeros(4, 32 * model.num_features).to(device)
    criterion = get_loss_fn()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.001,
                                momentum=0.9,
                                weight_decay=1e-5)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=2,
                                                gamma=0.95)

    tr_df_all = pd.read_csv(train_csv_path)
    tr_df, val_df = train_test_split(tr_df_all, test_size=0.2)
    val_df = val_df.reset_index(drop=True)
Exemplo n.º 29
0
    img = (img + 1) / 2.

    if filename:
        cv2.imwrite(filename, img*255)
        
    cv2.imshow(window, img)
    key = cv2.waitKey(1)
            
BATCH_SIZE = 1
FRAMES_PER_CONCEPT = 4
BATCH_COUNT = 100000

with tf.device('/gpu:0'):

    (generator, discriminator, 
     generator_discriminator) = model.init_model((20, 80, 1), FRAMES_PER_CONCEPT)
    
    generator = sensorimotor_generator(generator, scale_factor=.25,
                                       batch_size=BATCH_SIZE,
                                       frames_per_concept=FRAMES_PER_CONCEPT)

    for i in range(BATCH_COUNT):
        
        (real_images, real_actions, 
         fake_images, fake_actions) = next(generator)
        
        d_loss = discriminator.train_on_batch([real_images, real_actions], np.array([1,] * BATCH_SIZE))
        print("Batch", i, "d_loss_real_real=", d_loss)
        
        d_loss = discriminator.train_on_batch([fake_images, fake_actions], np.array([0,] * BATCH_SIZE))
        print("Batch", i, "d_loss_fake_fake=", d_loss)
Exemplo n.º 30
0
import datetime
import sys
import traceback
import logging

e = create_engine(
    'sqlite+pysqlite:///proverbaro.db',
    module=sqlite,
    encoding="utf-8",
)

logger = logging.getLogger(__name__)

Base = declarative_base()

repository = init_model(Base)


class TwitterPublisher(object):
    def __init__(
            self, consumer_key, consumer_secret, access_token,
            access_token_key, hashtag):
        self.consumer_key = consumer_key
        self.consumer_secret = consumer_secret
        self.access_token = access_token
        self.access_token_key = access_token_key
        self.hashtag = hashtag

    def post_tweet(self, proverb):
        client = UserClient(self.consumer_key, self.consumer_secret,
                            self.access_token, self.access_token_key)
Exemplo n.º 31
0
def test_users_model_api_database():
    sys.path.insert(0, os.getcwd() + "/examples/user/database-model")
    try:
        from authkit.users.sqlalchemy_driver import UsersFromDatabase
    except ImportError:
        raise Exception("Could not run the SQLAlchemy tests, not installed")
    if os.path.exists("test.db"):
        os.remove("test.db")
    import model as test_model

    # Setup SQLAlchemy database engine
    from sqlalchemy import engine_from_config

    engine = engine_from_config({"sqlalchemy.url": "sqlite:///test.db"}, "sqlalchemy.")
    test_model.init_model(engine)
    test_model.engine = engine

    d = UsersFromDatabase(test_model)

    test_model.meta.metadata.create_all(test_model.engine)

    d.role_create("wiki")
    d.role_create("adMin")
    d.role_create("editor")
    d.group_create("pyLOns")
    d.group_create("dJAngo")
    d.user_create("jaMEs", "passWOrd1", "pyLoNs")
    d.user_create("ben", "password2")
    d.user_create("Simon", "password3")
    d.user_create("ian", "paSsword4")
    assertEqual(d.list_roles(), ["admin", "editor", "wiki"])
    assertEqual(d.list_groups(), ["django", "pylons"])
    assertEqual(d.list_users(), ["ben", "ian", "james", "simon"])
    assertEqual(d.user_has_password("james", "passWOrd1"), True)
    assertEqual(d.user_has_password("james", "password1"), False)

    d.role_create("test_role")
    d.group_create("test_group")
    d.user_create("test_user", "password")
    assertEqual(d.list_roles(), ["admin", "editor", "test_role", "wiki"])
    assertEqual(d.list_groups(), ["django", "pylons", "test_group"])
    assertEqual(d.list_users(), ["ben", "ian", "james", "simon", "test_user"])
    d.role_delete("test_role")
    d.group_delete("test_group")
    d.user_delete("test_user")
    assertEqual(d.list_roles(), ["admin", "editor", "wiki"])
    assertEqual(d.list_groups(), ["django", "pylons"])
    assertEqual(d.list_users(), ["ben", "ian", "james", "simon"])

    assertEqual(d.user_has_role("james", "admin"), False)
    d.user_add_role("james", "admin")
    assertEqual(d.user_has_role("james", "admin"), True)
    d.user_remove_role("james", "admin")
    assertEqual(d.user_has_role("james", "admin"), False)

    d.user_add_role("james", "wiki")
    d.user_add_role("simon", "wiki")
    d.user_add_role("james", "admin")
    # d.user_add_role("james", "editor")
    d.user_add_role("ben", "editor")

    assertEqual(d.user_has_group("james", "pylons"), True)
    assertEqual(d.user_has_group("simon", None), True)
    assertEqual(d.user_has_group("simon", "django"), False)
    d.user_set_group("simon", "dJangO")
    assertEqual(d.user_has_group("simon", None), False)
    d.user_set_group("bEn", "PyLONS")
    assertEqual(d.user_has_group("simon", "django"), True)
    assertEqual(d.user_has_group("bEn", "pYlons"), True)
    d.user_remove_group("bEn")
    assertEqual(d.user_has_group("bEn", "pYlons"), False)
    d.user_set_group("bEn", "PyLONS")
    assertEqual(d.user_has_group("bEn", "pYlons"), True)

    assertEqual(d.list_users(), ["ben", "ian", "james", "simon"])
    d.user_set_username("james", "jim")
    assertEqual(d.list_users(), ["ben", "ian", "jim", "simon"])
    d.user_set_username("jim", "james")

    from authkit.users import (
        UsersFromFile,
        UsersFromString,
        AuthKitNoSuchUserError,
        AuthKitNoSuchGroupError,
        AuthKitNoSuchRoleError,
    )

    string_data = """jaMEs:passWOrd1:pyLOns wiki adMin
    ben:password2:pylons admin editor
    simon:password3:dJAngo
    ian:paSsword4 wiki
    """
    filename = "test/user_file_data.txt"

    s = UsersFromString(string_data)
    f = UsersFromFile(filename)

    # Test Parsing
    assertAllEqual(
        s.passwords, f.passwords, {"james": "passWOrd1", "ben": "password2", "simon": "password3", "ian": "paSsword4"}
    )
    assertAllEqual(
        s.roles, f.roles, {"james": ["admin", "wiki"], "ben": ["admin", "editor"], "ian": ["wiki"], "simon": []}
    )
    assertAllEqual(s.groups, f.groups, {"james": "pylons", "ben": "pylons", "ian": None, "simon": "django"})
    assertAllEqual(s.usernames, f.usernames, ["ben", "ian", "james", "simon"])

    # Test list functions
    assertAllEqual(s.list_users(), f.list_users(), d.list_users(), ["ben", "ian", "james", "simon"])
    assertAllEqual(s.list_roles(), f.list_roles(), d.list_roles(), ["admin", "editor", "wiki"])
    assertAllEqual(s.list_groups(), f.list_groups(), d.list_groups(), ["django", "pylons"])

    # Test user has functions
    assertAllEqual(
        s.user_has_role("jAMes", "WiKi"), f.user_has_role("jAMes", "WiKi"), d.user_has_role("jAMes", "WiKi"), True
    )
    assertAllEqual(
        s.user_has_role("jAMes", "editOr"),
        f.user_has_role("jAMes", "editOr"),
        d.user_has_role("jAMes", "editOr"),
        False,
    )

    assertAllEqual(
        s.user_has_group("jAMeS", "PyLons"),
        f.user_has_group("jAMes", "pylOns"),
        d.user_has_group("jAMes", "pylOns"),
        True,
    )
    assertAllEqual(
        s.user_has_group("jameS", "djaNgo"),
        f.user_has_group("JAMes", "djAngo"),
        d.user_has_group("JAMes", "djAngo"),
        False,
    )

    assertAllEqual(
        s.user_has_password("jAMeS", "passWOrd1"),
        f.user_has_password("jAMes", "passWOrd1"),
        d.user_has_password("jAMes", "passWOrd1"),
        True,
    )
    assertAllEqual(
        s.user_has_password("jameS", "PASSWORD1"),
        f.user_has_password("JAMes", "PASSWORD1"),
        d.user_has_password("JAMes", "PASSWORD1"),
        False,
    )

    # Existence Methods
    assertAllEqual(s.user_exists("jAMeS"), f.user_exists("jAMes"), d.user_exists("jAMes"), True)
    assertAllEqual(s.user_exists("nobody"), f.user_exists("nobody"), d.user_exists("nobody"), False)

    # Existence Methods
    assertAllEqual(s.role_exists("wiKi"), f.role_exists("Wiki"), d.role_exists("Wiki"), True)
    assertAllEqual(s.role_exists("norole"), f.role_exists("norole"), d.role_exists("norole"), False)

    assertAllEqual(s.group_exists("pyLons"), f.group_exists("PYlons"), d.group_exists("PYlons"), True)
    assertAllEqual(s.group_exists("nogroup"), f.group_exists("nogroup"), d.group_exists("nogroup"), False)

    # User Methods

    assertAllEqual(
        s.user("James"),
        f.user("James"),
        d.user("James"),
        {"username": "******", "group": "pylons", "password": "******", "roles": ["admin", "wiki"]},
    )

    # Test all user methods raise:
    for plugin in [s, f, d]:
        for func in ["user", "user_roles", "user_group", "user_password"]:
            try:
                getattr(plugin, func)("nouser")
            except AuthKitNoSuchUserError, e:
                pass
            else:
                raise AssertionError("Failed to throw a no user error")
Exemplo n.º 32
0
 def create_db(self):
     init_model(self.engine)
Exemplo n.º 33
0
import model
import img
import sys
import os

c_model = model.init_model()

if len(sys.argv) > 1:
    path = sys.argv[1]
else:
    print('Usage: python3 test.py folder')
    sys.exit()

for filename in os.listdir(path):
    captcha = img.load_captcha_file(f'{path}/{filename}')
    char_array = img.cut_captcha(captcha)
    text = model.get_captcha_from_array(c_model, char_array)
    print(text)
    os.rename(f'{path}/{filename}', f'{path}/{text}.png')
Exemplo n.º 34
0
def setup_database():
    init_model(engine)
    teardownDatabase()
    elixir.setup_all(True)

    # Creating permissions

    see_site = Permission()
    see_site.permission_name = u'see-site'
    see_site.description = u'see-site permission description'
    DBSession.save(see_site)

    edit_site = Permission()
    edit_site.permission_name = u'edit-site'
    edit_site.description = u'edit-site permission description'
    DBSession.save(edit_site)

    commit = Permission()
    commit.permission_name = u'commit'
    commit.description = u'commit permission description'
    DBSession.save(commit)

    # Creating groups

    admins = Group()
    admins.group_name = u'admins'
    admins.display_name = u'Admins Group'
    admins.permissions.append(edit_site)
    DBSession.save(admins)

    developers = Group(group_name=u'developers',
                       display_name=u'Developers Group')
    developers.permissions = [commit, edit_site]
    DBSession.save(developers)

    trolls = Group(group_name=u'trolls', display_name=u'Trolls Group')
    trolls.permissions.append(see_site)
    DBSession.save(trolls)

    # Plus a couple of groups with no permissions
    php = Group(group_name=u'php', display_name=u'PHP Group')
    DBSession.save(php)

    python = Group(group_name=u'python', display_name=u'Python Group')
    DBSession.save(python)

    # Creating users

    user = User()
    user.user_name = u'rms'
    user.password = u'freedom'
    user.email_address = u'*****@*****.**'
    user.groups.append(admins)
    user.groups.append(developers)
    DBSession.save(user)

    user = User()
    user.user_name = u'linus'
    user.password = u'linux'
    user.email_address = u'*****@*****.**'
    user.groups.append(developers)
    DBSession.save(user)

    user = User()
    user.user_name = u'sballmer'
    user.password = u'developers'
    user.email_address = u'*****@*****.**'
    user.groups.append(trolls)
    DBSession.save(user)

    # Plus a couple of users without groups
    user = User()
    user.user_name = u'guido'
    user.password = u'phytonic'
    user.email_address = u'*****@*****.**'
    DBSession.save(user)

    user = User()
    user.user_name = u'rasmus'
    user.password = u'php'
    user.email_address = u'*****@*****.**'
    DBSession.save(user)

    DBSession.commit()
Exemplo n.º 35
0
 def __init__(self):
     self.model = init_model()
Exemplo n.º 36
0
    if filename:
        cv2.imwrite(filename, img * 255)

    cv2.imshow(window, img)
    key = cv2.waitKey(1)


BATCH_SIZE = 1
FRAMES_PER_CONCEPT = 5
BATCH_COUNT = 100000

with tf.device('/gpu:0'):

    (concept_model, discriminator, visual_generator, action_generator,
     generator_discriminator) = model.init_model((20, 80, 1), BATCH_SIZE)

    generator = sensorimotor_generator(concept_model,
                                       visual_generator,
                                       action_generator,
                                       scale_factor=.25,
                                       batch_size=BATCH_SIZE,
                                       frames_per_concept=FRAMES_PER_CONCEPT)

    for i in range(BATCH_COUNT):

        (real_images, real_actions, fake_images,
         fake_actions) = next(generator)

        d_loss = discriminator.train_on_batch([real_images, real_actions],
                                              np.array([
Exemplo n.º 37
0
def train(best_score, start_epoch, max_epochs, learning_rate, batch_size,
          img_w, img_h, sgd, context):
    print("Loading dataset...", flush=True)
    dataset = load_dataset("data")
    split = int(len(dataset) * 0.9)
    training_set = dataset[:split]
    print("Training set: ", len(training_set))
    validation_set = dataset[split:]
    print("Validation set: ", len(validation_set))

    if os.path.isfile("model/global-wheat-yolo3-darknet53.params"):
        model = load_model("model/global-wheat-yolo3-darknet53.params",
                           ctx=context)
    else:
        model = init_model(ctx=context)
    metrics = [
        gcv.utils.metrics.VOCMApMetric(iou_thresh=iou)
        for iou in [0.5, 0.55, 0.6, 0.65, 0.7, 0.75]
    ]

    print("Learning rate: ", learning_rate)
    if sgd:
        print("Optimizer: SGD")
        trainer = mx.gluon.Trainer(model.collect_params(), "SGD", {
            "learning_rate": learning_rate,
            "momentum": 0.5
        })
    else:
        print("Optimizer: Nadam")
        trainer = mx.gluon.Trainer(model.collect_params(), "Nadam",
                                   {"learning_rate": learning_rate})
    if os.path.isfile("model/global-wheat-yolo3-darknet53.state"):
        trainer.load_states("model/global-wheat-yolo3-darknet53.state")

    print("Traning...", flush=True)
    for epoch in range(start_epoch, max_epochs):
        ts = time.time()

        random.shuffle(training_set)
        training_total_L = 0.0
        training_batches = 0
        for x, objectness, center_targets, scale_targets, weights, class_targets, gt_bboxes in get_batches(
                training_set,
                batch_size,
                width=img_w,
                height=img_h,
                net=model,
                ctx=context):
            training_batches += 1
            with mx.autograd.record():
                obj_loss, center_loss, scale_loss, cls_loss = model(
                    x, gt_bboxes, objectness, center_targets, scale_targets,
                    weights, class_targets)
                L = obj_loss + center_loss + scale_loss + cls_loss
                L.backward()
            trainer.step(x.shape[0])
            training_batch_L = mx.nd.mean(L).asscalar()
            if training_batch_L != training_batch_L:
                raise ValueError()
            training_total_L += training_batch_L
            print(
                "[Epoch %d  Batch %d]  batch_loss %.10f  average_loss %.10f  elapsed %.2fs"
                % (epoch, training_batches, training_batch_L,
                   training_total_L / training_batches, time.time() - ts),
                flush=True)
        training_avg_L = training_total_L / training_batches

        for metric in metrics:
            metric.reset()
        for x, label in get_batches(validation_set,
                                    batch_size,
                                    width=img_w,
                                    height=img_h,
                                    ctx=context):
            classes, scores, bboxes = model(x)
            for metric in metrics:
                metric.update(bboxes, classes.reshape((0, -1)),
                              scores.reshape((0, -1)), label[:, :, :4],
                              label[:, :, 4:5].reshape((0, -1)))
        score = mx.nd.array([metric.get()[1] for metric in metrics],
                            ctx=context).mean()

        print(
            "[Epoch %d]  training_loss %.10f  validation_score %.10f  best_score %.10f  duration %.2fs"
            % (epoch + 1, training_avg_L, score.asscalar(), best_score,
               time.time() - ts),
            flush=True)

        if score.asscalar() > best_score:
            best_score = score.asscalar()
            model.save_parameters(
                "model/global-wheat-yolo3-darknet53_best.params")

        model.save_parameters("model/global-wheat-yolo3-darknet53.params")
        trainer.save_states("model/global-wheat-yolo3-darknet53.state")
Exemplo n.º 38
0
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename='proverbaro.log', format=FORMAT)
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)

logger = logging.getLogger(__name__)

app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///proverbaro.db'
db = SQLAlchemy(app)


model = init_model(db.Model)


@app.route('/<date>/<int:publish_id>')
def show_proverb(date, publish_id):
    proverb = model.proverb_to_show(date, publish_id)
    definitions = []
    if proverb is not None:
        definitions = [
            find_definition(split_proverb_into_words(proverb.text)[0]),
            find_definition(split_proverb_into_words(proverb.text)[2])
        ]
    print definitions
    return render_template(
        'proverb.html',
        proverb=proverb.text if proverb is not None else None,
Exemplo n.º 39
0
def setup_database():
    init_model(engine)
    teardownDatabase()
    elixir.setup_all(True)

    # Creating permissions

    see_site = Permission()
    see_site.permission_name = u'see-site'
    see_site.description = u'see-site permission description'
    DBSession.save(see_site)

    edit_site = Permission()
    edit_site.permission_name = u'edit-site'
    edit_site.description = u'edit-site permission description'
    DBSession.save(edit_site)

    commit = Permission()
    commit.permission_name = u'commit'
    commit.description = u'commit permission description'
    DBSession.save(commit)

    # Creating groups

    admins = Group()
    admins.group_name = u'admins'
    admins.display_name = u'Admins Group'
    admins.permissions.append(edit_site)
    DBSession.save(admins)

    developers = Group(group_name=u'developers',
                       display_name=u'Developers Group')
    developers.permissions = [commit, edit_site]
    DBSession.save(developers)

    trolls = Group(group_name=u'trolls', display_name=u'Trolls Group')
    trolls.permissions.append(see_site)
    DBSession.save(trolls)

    # Plus a couple of groups with no permissions
    php = Group(group_name=u'php', display_name=u'PHP Group')
    DBSession.save(php)

    python = Group(group_name=u'python', display_name=u'Python Group')
    DBSession.save(python)

    # Creating users

    user = User()
    user.user_name = u'rms'
    user.password = u'freedom'
    user.email_address = u'*****@*****.**'
    user.groups.append(admins)
    user.groups.append(developers)
    DBSession.save(user)

    user = User()
    user.user_name = u'linus'
    user.password = u'linux'
    user.email_address = u'*****@*****.**'
    user.groups.append(developers)
    DBSession.save(user)

    user = User()
    user.user_name = u'sballmer'
    user.password = u'developers'
    user.email_address = u'*****@*****.**'
    user.groups.append(trolls)
    DBSession.save(user)

    # Plus a couple of users without groups
    user = User()
    user.user_name = u'guido'
    user.password = u'phytonic'
    user.email_address = u'*****@*****.**'
    DBSession.save(user)

    user = User()
    user.user_name = u'rasmus'
    user.password = u'php'
    user.email_address = u'*****@*****.**'
    DBSession.save(user)

    DBSession.commit()
Exemplo n.º 40
0
import model
from authkit.users.sqlalchemy_driver import UsersFromDatabase

# Setup SQLAlchemy database engine
from sqlalchemy import engine_from_config

engine = engine_from_config({'sqlalchemy.url': 'sqlite:///test.db'},
                            'sqlalchemy.')
model.init_model(engine)
model.engine = engine

users = UsersFromDatabase(model)
model.meta.metadata.create_all(model.engine)
users.group_create("pylons")
users.role_create("admin")
users.user_create("james", password="******", group="pylons")
users.user_create("ben", password="******")
users.user_add_role("ben", role="admin")

# Commit the changes
model.meta.Session.commit()
model.meta.Session.remove()
Exemplo n.º 41
0
            np.hstack([bboxes1, np.full((bboxes1.shape[0], 1), r)]),
            np.hstack([bboxes2,
                       np.full((bboxes2.shape[0], 1), 1.0 - r)])
        ])
        return mix_raw.astype("uint8"), mix_bboxes


def reconstruct_color(img):
    mean = mx.nd.array([0.485, 0.456, 0.406])
    std = mx.nd.array([0.229, 0.224, 0.225])
    return ((img * std + mean).clip(0.0, 1.0) * 255).astype("uint8")


if __name__ == "__main__":
    from model import init_model
    net = init_model()
    data = load_dataset("data")
    print("dataset preview: ", data[:3])
    print("max count of bboxes: ", max([len(bboxes) for _, _, bboxes in data]))
    print("training batch preview: ", next(get_batches(data, 4, net=net)))
    print("validation batch preview: ", next(get_batches(data, 4)))
    import matplotlib.pyplot as plt
    print("data visual preview: ")
    sampler = Sampler(data, 512, 512, net)
    for i, x in enumerate(data):
        print(x[1])
        y = sampler(i)
        gcv.utils.viz.plot_bbox(reconstruct_color(y[0].transpose((1, 2, 0))),
                                y[6])
        plt.show()