示例#1
0
def main(opts):
    model = Model(66, opts.size)
    model.model.summary()
    model.load(opts.weights)

    train_list, val_list = split(opts.data)
    val_dataset = AFLW2000(val_list, batch_size=1, input_size=opts.size)

    err, times = [], []
    for idx, (x, y) in enumerate(val_dataset.data_generator()):
        print(f'{idx}/{val_dataset.epoch_steps}')

        t1 = time()
        res = model.test_online(x)
        times.append(time() - t1)
        ypr = np.array(y)[:, 0, 1]
        err.append(abs(ypr - res))

        print(f'YPR: {np.mean(np.array(err), axis=0)}')
        print(f'TIME: {np.mean(times)}')
        if idx == val_dataset.epoch_steps:
            break
示例#2
0
文件: net_train.py 项目: ht014/snedq
    def __init__(self,
                 data_processor,
                 bottleneck_dim=128,
                 num_codebooks=16,
                 hidden_dim=512,
                 decoder_layers=2,
                 encoder_layers=2,
                 **kwargs):
        super().__init__()
        self.data_processor = data_processor
        self.encoder1 = nn.Sequential(
            Feedforward(self.data_processor.input_dim,
                        hidden_dim,
                        num_layers=encoder_layers,
                        **kwargs), nn.Linear(hidden_dim, bottleneck_dim))

        self.quntizer = Model(input_dim=bottleneck_dim,
                              hidden_dim=1024,
                              bottleneck_dim=256,
                              encoder_layers=2,
                              decoder_layers=2,
                              Activation=nn.ReLU,
                              num_codebooks=8,
                              codebook_size=256,
                              initial_entropy=3.0,
                              share_codewords=True).cuda()
        self.distance = DISTANCES['euclidian_squared']
        self.triplet_delta = 5
        all_parameters = list(self.encoder1.parameters()) + list(
            self.quntizer.parameters())
        self.optimizer = OneCycleSchedule(QHAdam(all_parameters,
                                                 nus=(0.8, 0.7),
                                                 betas=(0.95, 0.998)),
                                          learning_rate_base=1e-3,
                                          warmup_steps=10000,
                                          decay_rate=0.2)
        self.experiment_path = 'logs'

        self.writer = SummaryWriter(self.experiment_path, comment='Cora')
示例#3
0
文件: cron.py 项目: johmats/ergasia
def cronjob(sched_id, cmd_id):
    db_model = Model()

    devices = db_model.get_schedule_active_devices(sched_id, 'iled', 'iled')
    if not devices:
        return

    with open(cache_json_path, 'r') as fin:
        json = json_load(fin)
        cmd_name = json['command_names'][str(cmd_id)]
        
    errors = []
    succeeded = []
    dev_ids_to_update = []
    for dev_id, dev_name, password in devices:
        # call worker.py
        res = handle_request(
            vendor='iled', model='iled', dev_name=dev_name,
            password=password, command=cmd_id
        )

        if res['status'] != 'OK':
            errors.append({
                'dev_name': dev_name,
                'status': res['status'],
                'reason': res['message']
            })
        else:
            dev_ids_to_update.append(dev_id)
            succeeded.append(dev_name)
    
    if errors:
        json = json_dumps({'schedule_id' : sched_id, 'vendor' : 'iled', 'model' : 'iled', 'command': cmd_name, 'errors' : errors})
        db_model.insert_log('schedule_error', json, 1)
    
    if dev_ids_to_update:
        db_model.update_device_status(dev_ids_to_update, cmd_id=cmd_id, dstat_name='active')

    db_model.cleanup()
示例#4
0
文件: cron.py 项目: johmats/ergasia
def _cronjob(sched_id):
    db_model = Model()

    devices = db_model.get_schedule_active_devices(sched_id, 'nasys', 'ul2011')

    sched_cmd_data = generate_schedule(sched_id)
    cmd = 'send_custom_command'
    msg = json_dumps(sched_cmd_data)

    errors = []
    succeeded = []
    for _dev_id, dev_name, password in devices:
        res = handle_request(vendor='nasys',
                             model='ul2011',
                             dev_name=dev_name,
                             password=password,
                             command=cmd,
                             parameter=msg)

        if res['status'] != 'OK':
            errors.append({
                'dev_name': dev_name,
                'status': res['status'],
                'reason': res['message']
            })
        else:
            succeeded.append(dev_name)

    if errors:
        json = json_dumps({
            'schedule_id': sched_id,
            'vendor': 'nasys',
            'model': 'ul2011',
            'errors': errors
        })
        db_model.insert_log('schedule_error', json, 1)

    db_model.cleanup()
示例#5
0
def update_cron():
    times = get_sunrise_sunset_times()

    db_model = Model()
    schedule_items = db_model.get_all_schedule_items(times)
    schedule_models = db_model.get_all_schedule_models()
    db_model.cleanup()

    # print(schedule_items)
    # print(schedule_models)

    with open(cron_txt_path, 'w') as fout:
        write_crons_header(fout)

        for sched_id in schedule_items:
            items = schedule_items[sched_id]
            models = schedule_models[sched_id]

            # FIXME: the lines probably belond in a separate function for each model in drivers/model/funcs.py

            # NASys controllers
            if ('nasys', 'ul2011') in models:
                fout.write(
                    f'{crontimes["nasys_update_schedule"]} /usr/bin/python3 {basepath}/drivers/nasys/ul2011/cron.py {sched_id}\n'
                )

            # iLED controllers
            if ('iled', 'iled') in models:
                for time, cmd_id, _cmd_name in items:
                    fout.write(
                        f'{time[3:5]} {time[0:2]} * * * /usr/bin/python3 {basepath}/drivers/iled/iled/cron.py {sched_id} {cmd_id}\n'
                    )
        fout.flush()

    username = getenv('USER')
    run([f'crontab -u {username} {cron_txt_path} <<EOL'], shell=True)
示例#6
0
 def setUp(self):
     self.exec_key = registry.create_exec_env(save_on_register=True)
     self.already_cleared = False
     self.test_agent = Agent(TEST_AGENT_NM, exec_key=self.exec_key,
                             action=self.agent_action)
     self.model = Model(exec_key=self.exec_key)
    def test_parse_should_call_given_parser_strategy(self):
        path = 'dummy/path'
        model = Model([], [])
        self._writer.write(path, model)

        self._writing_strategy_mock.write.assert_called_once_with(path, model)
示例#8
0
def die(msg):

    print(msg)
    exit(-1)


####################################################################################################

# main
if __name__ == '__main__':
    # Ensure the script is not already running
    res = try_lock_pidfile(pidfile)
    if res['status'] != 'OK':
        die(json_dumps(res))

    db_model = Model()

    # for each command
    for log in db_model.get_logs_with_type('command'):
        json = json_loads(log['log_message'])
        cmd_id = json['cmd_id']

        succeeded, errors = [], []

        try:
            # get command name from id
            try:
                with open(cache_json_path, 'r') as fin:
                    all_commands = json_load(fin)['command_names']
                    cmd_name = all_commands[str(cmd_id)]
            except:
# -*- coding: utf-8 -*-
import os

from sklearn.preprocessing import RobustScaler
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline

from lib.training import routine
from lib.model import Model

if __name__ == '__main__':
    estimators = [
        ExtraTreesRegressor(n_estimators=15,
                            criterion='mae',
                            min_samples_split=.01,
                            bootstrap=True,
                            n_jobs=-1,
                            random_state=0),
        Pipeline([('scaler', RobustScaler()),
                  ('svr', SVR(C=20, gamma=.3, epsilon=.1))])
    ]

    model = Model(estimators, [.45, .55])

    routine(dir_path=os.path.dirname(__file__),
            task_id=1,
            model=model,
            mode=os.environ.get('KDD_MODE', 'train'))
    print('task1 train done')
def test_intermediate_model():
    efficientnet_model = Model()
    layer_details = efficientnet_model.get_layers()
    layer_name = layer_details[len(layer_details) - 2]["name"]
    intermediate_model = efficientnet_model.get_intermediate_model(layer_name)
    assert (intermediate_model.__class__.__name__ == "Functional")
    def run_training(train,
                     valid,
                     feature_cols,
                     target_cols,
                     model_path,
                     seed,
                     param_provided=None):
        seed_everything(seed)

        x_train, y_train = train[feature_cols].values, train[
            target_cols].values
        # create the dataset loader
        train_dataset = MoADataset(x_train, y_train)
        trainloader = torch.utils.data.DataLoader(train_dataset,
                                                  batch_size=BATCH_SIZE,
                                                  shuffle=True)

        if valid is not None:
            x_valid, y_valid = valid[feature_cols].values, valid[
                target_cols].values
            valid_dataset = MoADataset(x_valid, y_valid)
            validloader = torch.utils.data.DataLoader(valid_dataset,
                                                      batch_size=BATCH_SIZE,
                                                      shuffle=False)

        # create an model instance
        if param_provided is not None:
            EPOCHS = param_provided['epoch']
            hidden_size = param_provided['hidden_size']
            LEARNING_RATE = param_provided['lr']

        print("hidden_size: ", hidden_size, ", learning_rate: ", LEARNING_RATE)

        # create an model instance
        model = Model(
            num_features=num_features,
            num_targets=num_targets,
            hidden_size=hidden_size,
        )

        model.to(DEVICE)

        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=LEARNING_RATE,
                                     weight_decay=WEIGHT_DECAY)
        # scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.05, div_factor=1.5e3,
        #                                           max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader))

        # lmbda = lambda epoch: 0.5
        # scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer, lr_lambda=lmbda)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=4,
                                                    gamma=0.5)

        loss_fn = nn.BCEWithLogitsLoss()

        early_stopping_steps = EARLY_STOPPING_STEPS
        early_step = 0

        best_loss = np.inf

        #
        train_losses = []
        valid_losses = []

        for epoch in range(EPOCHS):

            print('Epoch {}, lr {}'.format(epoch,
                                           optimizer.param_groups[0]['lr']))

            train_loss = train_fn(model, optimizer, scheduler, loss_fn,
                                  trainloader, DEVICE)
            train_losses.append(train_loss)

            # print(f"FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss}")
            if valid is not None:  # only run the valid if valid set is provided
                valid_loss, valid_preds = valid_fn(model, loss_fn, validloader,
                                                   DEVICE)
                valid_losses.append(valid_loss)

                if epoch % 5 == 0:
                    print(
                        f"EPOCH: {epoch}, train_loss: {train_loss}, valid_loss: {valid_loss}"
                    )

                if valid_loss < best_loss:

                    best_loss = valid_loss
                    # oof[val_idx] = valid_preds
                    torch.save(model.state_dict(), model_path)

                elif EARLY_STOP == True:
                    early_step += 1
                    if early_step >= early_stopping_steps:
                        break
            else:
                if epoch % 10 == 0:
                    print(f"EPOCH: {epoch}, train_loss: {train_loss}")

        print("early stop with epoch: ", epoch)
        print(f"LAST EPOCH: {epoch}, train_loss: {train_loss}")

        if valid is None:  # when there is not valid set, save the model
            torch.save(model.state_dict(), model_path)

        return {"train_losses": train_losses, "valid_losses": valid_losses}
示例#12
0
    def load(self, path):
        data = torch.load(path)

        return Model(conf=data['conf'],
                     idx_to_class=data['idx_to_class'],
                     state=data['state'])
示例#13
0
app = Flask(__name__, static_url_path='')

from threading import Thread, current_thread
logger.debug('子进程:%s,父进程:%s,线程:%r', os.getpid(), os.getppid(),
             current_thread())

config = Config('../config.json')

try:
    confs = config.all()
except Exception as e:
    logger.error("配置文件解析出错:%r", e)
    exit()

model = Model("../model/")

try:
    model.load_all(confs)
except Exception as e:
    logger.error("加载Model出错:%r", e)
    exit()

#注意!一定要放到加载了模型之后
graph = tf.get_default_graph()
logger.debug("全局获得Graph:%r", graph)


#读入的buffer是个纯byte数据
def process(buffer, bank):
    logger.debug("从web读取数据len:%r", len(buffer))
示例#14
0
def _handle_request(**kwargs):
    # Load json
    with open(cache_json_path, 'r') as fin:
        cache_json = json_load(fin)

    try:
        cmd_id = str(kwargs['command'])
    except KeyError as e:
        return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}

    # If cmd_id is not a known id, assume the command name has been given
    try:
        cmd_name = cache_json['command_names'][cmd_id]
    except KeyError:
        cmd_name = cmd_id

    #################################################################
    # Set dim level command. All current models support it.
    #################################################################
    if cmd_name.startswith('set_dim_level'):
        try:
            vendor = kwargs['vendor']
            model = kwargs['model']
            dev_name = kwargs['dev_name']
            password = kwargs['password']
        except KeyError as e:
            return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}

        model_info = cache_json[vendor][model]
        cmd_data = model_info['commands'][cmd_name]
        cnx = model_info['cnx']

        return send_commands(dev_name, cmd_data, cnx, password)

    #################################################################
    # Update the cron file.
    #################################################################
    elif cmd_name == 'update_cron':
        # run updater
        update_cron()

        return {'status': 'OK', 'message': 'Success'}

    #################################################################
    # Update the schedule.
    #################################################################
    elif cmd_name == 'update_cache':
        # run updater
        update_cache()

        return {'status': 'OK', 'message': 'Success'}

    #################################################################
    # Start notification daemon for a certain model
    #################################################################
    elif cmd_name == 'start_notifications':
        try:
            vendor = kwargs['vendor']
            model = kwargs['model']
        except KeyError as e:
            return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}

        pidfile = f'{basepath}/pids/__{vendor}__{model}__notifier__.pid'
        daemon = f'{basepath}/scripts/notification_daemon.py'

        if is_locked_pidfile(pidfile):
            return {'status': 'OK', 'message': 'Daemon is running'}

        Popen(['/usr/bin/python3', daemon, '-v', vendor, '-m', model])

        return {'status': 'OK', 'message': 'Notification daemon started'}

    #################################################################
    # Stop notification daemon for a certain model
    #################################################################
    elif cmd_name == 'stop_notifications':
        try:
            vendor = kwargs['vendor']
            model = kwargs['model']
        except KeyError as e:
            return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}

        pidfile = f'{basepath}/pids/__{vendor}__{model}__notifier__.pid'

        try:
            with open(pidfile, 'r') as fin:
                pid = int(fin.readline())

            # ask daemon to terminate
            kill(pid, SIGTERM)
            return {'status': 'OK', 'message': 'Notification daemon stopped'}

        except FileNotFoundError as e:
            return {
                'status': 'OK',
                'message': 'Notification daemon is not running'
            }
        except OSError as e:
            if e.errno == ESRCH:
                return {
                    'status': 'OK',
                    'message': 'Notification daemon is not running'
                }
            else:
                return {
                    'status': 'NOTOK',
                    'message': 'Stopping the daemon failed'
                }
        except Exception as e:
            return {
                'status': 'OK',
                'message': f'An unknown error occured: {e}'
            }

    #################################################################
    # Custom command to update a device schedule. (only for devices
    # that support it, e.g. Nasys models)
    #################################################################
    elif cmd_name.startswith('set_device_schedule'):
        try:
            vendor = kwargs['vendor']
            model = kwargs['model']
            dev_name = kwargs['dev_name']
            password = kwargs['password']
            sched_id = int(kwargs['parameter'])
        except KeyError as e:
            return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}
        except ValueError as e:
            return {'status': 'NOTOK', 'message': f'Invalid parameter: {e}'}

        model_info = cache_json[vendor][model]
        cnx = model_info['cnx']

        # FIXME: pass the schedule as parameter, not the sched_id only, too slow
        db_model = Model()
        schedule = db_model.get_schedule_items(sched_id)
        db_model.cleanup()

        try:
            from importlib import import_module
            m = import_module(f'drivers.{vendor}.{model}.funcs')
            commands = m.generate_schedule_commands(schedule)
        except (ImportError, AttributeError):
            return {
                'status': 'OK',
                'message':
                f'{vendor}/{model} models do not store their schedule'
            }

        return send_commands(dev_name, commands, cnx, password)

    #################################################################
    # Custom command to clear the schedule of a device (that supports it)
    #################################################################
    elif cmd_name.startswith('clear_device_schedule'):
        try:
            vendor = kwargs['vendor']
            model = kwargs['model']
            dev_name = kwargs['dev_name']
            password = kwargs['password']
        except KeyError as e:
            return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}

        model_info = cache_json[vendor][model]
        cnx = model_info['cnx']

        try:
            cmd_data = model_info['commands']['clear_device_schedule']
        except KeyError:
            return {
                'status': 'OK',
                'message':
                f'{vendor}/{model} models do not store their schedule'
            }

        return send_commands(dev_name, cmd_data, cnx, password)

    #################################################################
    # Send a custom message to a device
    #################################################################
    elif cmd_name.startswith('send_custom_command'):
        try:
            vendor = kwargs['vendor']
            model = kwargs['model']
            dev_name = kwargs['dev_name']
            password = kwargs['password']
            parameter = kwargs['parameter']
        except KeyError as e:
            return {'status': 'NOTOK', 'message': f'Missing parameters: {e}'}

        model_info = cache_json[vendor][model]
        cnx = model_info['cnx']

        try:
            custom_cmd_data = json_loads(parameter)
        except:
            return {'status': 'NOTOK', 'message': f'Invalid message: {e}'}

        return send_commands(dev_name, custom_cmd_data, cnx, password)

    #################################################################
    # Unknown command, error out.
    #################################################################
    else:
        # Unknown command
        return {
            'status': 'NOTOK',
            'message': f'Command {cmd_name} is not implemented'
        }
                                             batch_size=BATCH_SIZE,
                                             shuffle=False)

    for k_fold in np.arange(NFOLDS):
        # for k_fold in [0]:

        if best_param_with_epoch is not None:
            hidden_size = best_param_with_epoch["kfold_" +
                                                str(k_fold)]['hidden_size']

        model_path = models_dir + prefix + "_model_fold_{0:d}.pth".format(
            k_fold)

        model = Model(
            num_features=num_features,
            num_targets=num_targets,
            hidden_size=hidden_size,
        )
        model.load_state_dict(torch.load(model_path))
        model.to(DEVICE)

        predictions = inference_fn(model, testloader, DEVICE)
        # save the prediction on test set
        pred_test = pd.DataFrame(data=predictions,
                                 columns=target_cols).fillna(0)
        pred_test = pd.concat([data_test_x[["sig_id"]], pred_test], axis=1)
        pred_result_f = models_dir + prefix + "_fold_{0:d}_prediction.csv".format(
            k_fold)
        pred_test.to_csv(pred_result_f, index=False)

    # merge all the results by average
    def run_training_tune(trial, train, valid, feature_cols, target_cols,
                          model_path, seed):
        seed_everything(seed)

        x_train, y_train = train[feature_cols].values, train[
            target_cols].values
        x_valid, y_valid = valid[feature_cols].values, valid[
            target_cols].values

        # create the dataset loader
        train_dataset = MoADataset(x_train, y_train)
        valid_dataset = MoADataset(x_valid, y_valid)
        trainloader = torch.utils.data.DataLoader(train_dataset,
                                                  batch_size=BATCH_SIZE,
                                                  shuffle=True)
        validloader = torch.utils.data.DataLoader(valid_dataset,
                                                  batch_size=BATCH_SIZE,
                                                  shuffle=False)

        hidden_size = trial.suggest_int("hidden_size", 30, 60, step=10)
        # create an model instance
        model = Model(
            num_features=num_features,
            num_targets=num_targets,
            hidden_size=hidden_size,
        )

        model.to(DEVICE)

        lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True)
        # lr = 0.0084

        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=WEIGHT_DECAY)
        # this is used to change the learning rate when training model
        # scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.05,
        #                                          max_lr=1e-2, epochs=EPOCHS, steps_per_epoch=len(trainloader))
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                    step_size=4,
                                                    gamma=0.5)

        loss_fn = nn.BCEWithLogitsLoss()

        early_stopping_steps = EARLY_STOPPING_STEPS
        early_step = 0

        best_loss = np.inf

        #
        train_losses = []
        valid_losses = []

        for epoch in range(EPOCHS):

            train_loss = train_fn(model, optimizer, scheduler, loss_fn,
                                  trainloader, DEVICE)
            train_losses.append(train_loss)
            # print(f"FOLD: {fold}, EPOCH: {epoch}, train_loss: {train_loss}")

            valid_loss, valid_preds = valid_fn(model, loss_fn, validloader,
                                               DEVICE)
            valid_losses.append(valid_loss)

            if epoch % 5 == 0:
                print(
                    f"EPOCH: {epoch}, train_loss: {train_loss}, valid_loss: {valid_loss}"
                )

            if valid_loss < best_loss:

                best_loss = valid_loss
                # now don't save the model to adapt to the use of optuna
                # torch.save(model.state_dict(), model_path)

            elif EARLY_STOP == True:
                early_step += 1
                if early_step >= early_stopping_steps:
                    break

        print("early stopping with epoch: ", epoch)

        if trial.should_prune():
            raise optuna.exceptions.TrialPruned()

        return valid_losses[-1]
示例#17
0
 def test_get_env(self):
     """
     See if we get an env we have registered back as the env.
     """
     self.model = Model(exec_key=self.exec_key)
     self.assertEqual(self.model.env, get_env(exec_key=self.exec_key))
示例#18
0
for i, (input_idx,
        target_idx) in enumerate(zip(input_example[:5], target_example[:5])):
    print("Step {:4d}".format(i))
    print("  input: {} ({:s})".format(input_idx, repr(idx2char[input_idx])))
    print("  expected output: {} ({:s})".format(target_idx,
                                                repr(idx2char[target_idx])))

# Buffer size to shuffle the dataset
# (TF data is designed to work with possibly infinite sequences,
# so it doesn't attempt to shuffle the entire sequence in memory. Instead,
# it maintains a buffer in which it shuffles elements).
BUFFER_SIZE = 10000

dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)

model = Model(len(char2idx), EMBEDDING_DIM, UNITS)

# Using adam optimizer with default arguments
optimizer = tf.train.AdamOptimizer()

model.build(tf.TensorShape([BATCH_SIZE, SEQ_LENGTH]))

model.summary()

# Training step
EPOCHS = 30

# Directory where the checkpoints will be saved
checkpoint_dir = os.getenv('TRAINING_CHECKPOINT_DIR',
                           './data/training_checkpoints')
# Name of the checkpoint files
def main():
    """ The main routine. """

    # Fix random seeds for reproducibility - these are themselves generated from random.org
    # From https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
    os.environ['PYTHONHASHSEED'] = '0'
    np.random.seed(91)
    rn.seed(95)
    session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
                                  inter_op_parallelism_threads=1)
    tf.set_random_seed(47)
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    k.set_session(sess)

    # Enable simple logging
    logging.basicConfig(level=logging.INFO, format='%(message)s')

    # Parse command line arguments
    args = parseargs()

    # Create run folder
    output_directory = create_output_folder(args.output)

    # Write arguments to file
    with open(output_directory + 'arguments.txt', 'a') as arguments_file:
        for arg in vars(args):
            arguments_file.write(
                str(arg) + ': ' + str(getattr(args, arg)) + '\n')

    ##############
    # Prepare data
    print('')
    data = Data(incidences_file=args.incidences,
                specifications_file=args.specifications,
                plot_data=args.plotData,
                output_directory=output_directory)
    data.state(message='Raw data')

    data.filter_cases(cases_file=args.cases)
    data.state(message='Filtered SEER*Stat cases from ASCII')

    # Determine inputs, filter, and pre process them
    data.apply_data_pipeline(pipelines.data_pipeline_full, args.oneHotEncoding)
    data.state(
        message=
        'Remove irrelevant, combined, post-diagnosis, and treatment attributes'
    )

    data.create_target(args.task)
    data.state(message='Create target label indicating cancer survival for ' +
               args.task)

    encodings = data.finalize()
    data.state(message='Remove inputs with constant values')

    ###############
    # Prepare model
    model = Model(model_type=args.model,
                  task=args.task,
                  input_dim=(len(data.frame.columns) - 1),
                  encodings=encodings,
                  mlp_layers=args.mlpLayers,
                  mlp_width=args.mlpWidth,
                  mlp_dropout=args.mlpDropout,
                  mlp_emb_neurons=args.mlpEmbNeurons,
                  svm_gamma=args.svmGamma,
                  svm_c=args.svmC,
                  logr_c=args.logrC)

    if args.plotData:
        model.plot_model(output_directory)

    ################
    # Carry out task
    experiment = Experiment(model=model,
                            data=data,
                            task=args.task,
                            valid_ratio=0.1,
                            test_ratio=0.1,
                            model_type=args.model,
                            encodings=encodings,
                            encode_categorical_inputs=args.oneHotEncoding,
                            plot_results=args.plotResults,
                            output_directory=output_directory)

    experiment.train(mlp_epochs=args.mlpEpochs)

    results_validate = experiment.validate()
    # Write validation results to file
    with open(output_directory + 'results_validate.txt', 'a') as results_file:
        for res in results_validate:
            results_file.write(res + '\n')

    # Only test final model, do not use for tuning
    if args.test:
        results_test = experiment.test()
        # Write validation results to file
        with open(output_directory + 'results_test.txt', 'a') as results_file:
            for res in results_test:
                results_file.write(res + '\n')

    ###################
    # Input importance
    if args.importance:
        importance = experiment.importance(encodings=encodings)
        # Write importance results to file
        with open(output_directory + 'results_importance.txt',
                  'a') as results_file:
            for (column, rel) in importance:
                results_file.write(column + '=' + str(rel) + '\n')
示例#20
0
        Pipeline([
            ('scaler', RobustScaler()),
            ('knn', KNeighborsRegressor(5, weights='distance', n_jobs=-1))
        ]),
        Pipeline([
            ('scaler', RobustScaler()),
            ('svr', SVR(
                C=160,
                gamma=0.1,
                epsilon=0.1
            ))
        ]),
        Pipeline([
            ('scaler', RobustScaler()),
            ('lasso', LassoCV(
                cv=KFold(4, True, 0),
                n_jobs=-1
            ))
        ])
    ]

    model = Model(estimators, [.45, .05, .45, .05])

    routine(
        dir_path=os.path.dirname(__file__),
        task_id=2,
        model=model,
        mode=os.environ.get('KDD_MODE', 'train')
    )
    print('task2 train done')
示例#21
0
    def __init__(self, protector):
        self.protector = protector

        self.client = mqtt.Client()
        self.db_model = Model()
        self.initialize_notifier()
示例#22
0
 def __init__(self, version):
     self.view = {}
     self.model = Model(version)
     wx.App.__init__(self, False)
示例#23
0
from lib.optimizers.adam       import Adam
from lib.data.data             import Data
from lib.hyperparameter_search import HyperparameterSearch
from lib.model                 import Model

data = Data().load()

for hyperparameters in HyperparameterSearch().hyperparameters():
    Model(data,
          Adam,
          hyperparameters['learning_rate'],
          hyperparameters['regularization_strength'],
          hyperparameters['number_hidden_layers'],
          hyperparameters['min_number_hidden_nodes']).train()
from data import load_data
from lib.model import Model
from util import Logger, train, validation, AdamOptimizer

# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]=str(exp_config['device'])
# torch.cuda.set_device(0)

# data
train_data, val_data = load_data(data_config, exp_config['batch_size'])
eval_length = data_config['eval_length']

# logger

# model
model = Model(**model_config).to(0)

# optimizer
optimizer = AdamOptimizer(params=model.parameters(), lr=exp_config['lr'],
                          grad_clip_value=exp_config['grad_clip_value'],
                          grad_clip_norm=exp_config['grad_clip_norm'])

logger_on = True

if logger_on:
    logger = Logger(exp_config, model_config, data_config)

# train / val loop
for epoch in range(exp_config['n_epochs']):

    print('Epoch:', epoch)
示例#25
0
test_dataset = SegDataset(ts.VALIDATION_LMDB)
test_align_collate = AlignCollate('test',
                                  ts.LABELS,
                                  ts.MEAN,
                                  ts.STD,
                                  ts.IMAGE_SIZE_HEIGHT,
                                  ts.IMAGE_SIZE_WIDTH,
                                  ts.ANNOTATION_SIZE_HEIGHT,
                                  ts.ANNOTATION_SIZE_WIDTH,
                                  ts.CROP_SCALE,
                                  ts.CROP_AR,
                                  random_cropping=ts.RANDOM_CROPPING,
                                  horizontal_flipping=ts.HORIZONTAL_FLIPPING)
assert test_dataset
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=opt.batchsize,
                                          shuffle=False,
                                          num_workers=opt.nworkers,
                                          pin_memory=pin_memory,
                                          collate_fn=test_align_collate)

# Define Model
# opt.batchsize
# opt.nepochs
model = Model(ts.LABELS, load_model_path=opt.model, usegpu=opt.usegpu)
model.fit(ts.CRITERION, ts.LEARNING_RATE, ts.WEIGHT_DECAY, ts.CLIP_GRAD_NORM,
          ts.LR_DROP_FACTOR, ts.LR_DROP_PATIENCE, ts.OPTIMIZE_BG, ts.OPTIMIZER,
          ts.TRAIN_CNN, opt.nepochs, ts.CLASS_WEIGHTS, train_loader,
          test_loader, model_save_path)
示例#26
0
from flask import Flask, send_from_directory, jsonify

from lib.model import Model
from lib.prezi_indexer import PerziIndexer

app = Flask(__name__, static_folder="prezi-app/dist", static_url_path="")
model = Model(
    PerziIndexer.index_json(open("data/prezis.json").read(), index_by='id'))


@app.route('/')
def index():
    return app.send_static_file('index.html')


@app.route('/<path:path>')
def static_proxy(path):
    return app.send_static_file(path)


@app.route('/prezi/<id>')
def get_prezi_data(id):
    return jsonify(model[id])


@app.route('/search/<phrase>')
def search_name(phrase):
    return jsonify(model.search(phrase, field_key="title"))


if __name__ == '__main__':
示例#27
0
from skimage import color

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.regularizers import l2
from keras import backend as K
from keras.callbacks import TensorBoard  
from keras.callbacks import ModelCheckpoint

from lib import label,image_process,log,crash_debug
from lib.cnn import CNN
from lib.model import Model
from lib.config import Config

__model = Model()

# batch_size 太小会导致训练慢,过拟合等问题,太大会导致欠拟合。所以要适当选择
def train(data_dir,conf,epochs=2,batch_size=100):
    image_width = conf.width
    image_height = conf.height
    _letters = conf.charset#字符集合
    _num_symbol = conf.number #识别字符的个数

    model = __model.load4train(conf)
    model_path = __model.model_path(conf)

    # 训练集
    x_train,y_train = image_process.load_all_image_by_dir(data_dir,conf)

    #训练期间保存checkpoint,防止整个crash