コード例 #1
0
def run():

    try:
        options = Options()
        options.parseOptions()
    except usage.UsageError as errortext:
        print('{}: {}'.format(sys.argv[0], errortext))
        sys.exit(1)

    if options.subCommand == 'application':
        handle_application_command(options)

    if options.subCommand == 'start':
        handle_start_command(options)

    if options.subCommand == 'stop':
        handle_stop_command()

    if options.subCommand == 'restart':
        handle_restart_command()

    if options.subCommand == 'sql':
        Sql(options.subOptions)

    if options.subCommand == 'controller':
        Controller(options)

    if options.subCommand == 'model':
        Model(options)

    if options.subCommand == 'view':
        View(options)

    if options.subCommand == 'package':
        Package(options.subOptions)
コード例 #2
0
ファイル: train.py プロジェクト: ShuangPA/edit_bert
    def __init__(self, bert_config_file, is_training, num_labels, train_file,
                 dev_file, vocab_file, output_dir, max_seq_length,
                 learning_rate, batch_size, epochs, warmup_proportion,
                 virtual_batch_size_ratio, evaluate_every, init_ckpt):
        os.system(f"mkdir {output_dir}")
        self._data_train = Dataset(train_file, num_labels, vocab_file, True,
                                   output_dir, True, max_seq_length)
        self._dev_data = Dataset(dev_file, num_labels, vocab_file, True,
                                 output_dir, False, max_seq_length)
        num_train_step = int(self._data_train.size / batch_size * epochs)
        num_warmup_step = int(num_train_step * warmup_proportion)

        self._model = Model(bert_config_file, max_seq_length, init_ckpt,
                            is_training, num_labels)

        self._train_op, self._global_step = optimization.create_optimizer(
            self._model.loss, learning_rate, num_train_step, num_warmup_step,
            False, virtual_batch_size_ratio)

        self.batch_size = batch_size
        self.epochs = epochs
        self.evaluate_every = evaluate_every
        self.output_dir = output_dir
        self._predictor = Predictor(bert_config_file, max_seq_length,
                                    num_labels)
コード例 #3
0
ファイル: eval.py プロジェクト: robert1003/ADL-hw1
test_dataset = TensorDataset(torch.from_numpy(test_X))

test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

# build model
from _model import Model
from torch import optim
import torch.nn as nn

teacher_forcing_ratio = 0.5
GRAD_MAX = 1

model = Model(embedding=embedding,
              input_size=embedding.shape[0],
              hidden_size=embedding.shape[1],
              output_size=1,
              amp=1,
              n_layers=2,
              direction=2,
              dropout=0.0).to(device)

# load model
print('loading pretrained model...')
checkpoint = torch.load(pretrained_ckpt)
model.load_state_dict(checkpoint['model_state_dict'])
print('done')

# define predict
import numpy as np


def predict(input_tensor):
コード例 #4
0
    'img_indices': [373, 413, 428, 468],
    'cnnid': 26,
    'iterations': 100,
    'lr': 0.01,
    'octave_scale': 1.2,
    'num_octaves': 10,
    'device': 'cuda'
}
args = argparse.Namespace(**args)

# build model
model = Model(
    make_layers([
        32, 32, 32, 'M',
        64, 64, 64, 'M',
        128, 128, 128, 'M',
        256, 256, 256, 256, 'M',
        512, 512, 512, 512, 'M'
    ])
).to(args.device)

# load checkpoint
checkpoint = torch.load(args.ckptpath)
model.load_state_dict(checkpoint['model_state_dict'])

# prepare dataset
valid_paths, valid_labels = get_paths_labels(os.path.join(args.dataset_dir, 'validation'))
valid_set = ImgDataset(valid_paths, valid_labels, 512, data_transforms['test'])

# dream & deep_dream
layer_activations = None
コード例 #5
0
ファイル: train.py プロジェクト: robert1003/ADL-hw1
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)

# build model
from _model import Model
from torch import optim
import torch.nn as nn

teacher_forcing_ratio = 0.5
GRAD_MAX = 1

model = Model(embedding=embedding,
              input_size=embedding.shape[0],
              hidden_size=embedding.shape[1],
              output_size=1,
              amp=1,
              n_layers=2,
              direction=2,
              dropout=0.0).to(device)

optimizer = optim.Adadelta(model.parameters())
weight = torch.ones(1).to(device)
weight[0] = train_Y.shape[1]
criterion = nn.BCEWithLogitsLoss(pos_weight=weight)


# define train, evaluate, predict
# train
def train(input_tensor, target_tensor):
    model.train()
コード例 #6
0
        options = Options()
        options.parseOptions()
    except usage.UsageError, errortext:
        print('{}: {}'.format(sys.argv[0], errortext))
        sys.exit(1)

    if options.subCommand == 'application':
        handle_application_command(options)

    if options.subCommand == 'start':
        handle_start_command(options)

    if options.subCommand == 'stop':
        handle_stop_command()

    if options.subCommand == 'sql':
        Sql(options.subOptions)

    if options.subCommand == 'controller':
        Controller(options)

    if options.subCommand == 'model':
        Model(options)

    if options.subCommand == 'view':
        View(options)


if __name__ == '__main__':
    run()
コード例 #7
0
ファイル: predict.py プロジェクト: ShuangPA/edit_bert
 def __init__(self, bert_config_file, max_seq_length, num_labels):
     self._graph = tf.Graph()
     with self._graph.as_default():
         self._model = Model(bert_config_file, max_seq_length, None, False,
                             num_labels)
     self._sess = tf.Session(graph=self._graph)