Example #1
0
def predict(batch: Batch):
    # Remove empty texts
    batch.texts = list(filter(lambda x: len(x) > 0, batch.texts))
    model = ModelWrapper()
    res = model(batch)

    return {"documents": res}
def prepare_model(learning_rate, momentum, checkpoint_file):
    """Prepare a ResNet-34 model with CrossEntropyLoss and SGD.

    Args:
        learning_rate (float): The learning rate for SGD.
        momentum (float): The momentum for SGD.
        checkpoint_file (str or None): If not `None`, the path of the
            checkpoint file to load.

    Returns:
        model.Model: The prepared model object.
    """
    # Load model.
    resnet = torchvision.models.resnet34()
    resnet.conv1 = torch.nn.Conv2d(1,
                                   64,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
    resnet.avgpool = torch.nn.AvgPool2d(2)

    # Prepare loss function and optimizer.
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(resnet.parameters(),
                                lr=learning_rate,
                                momentum=momentum)

    # Wrap model object and load checkpoint file if provided.
    model = ModelWrapper(resnet, loss_function, optimizer)
    if checkpoint_file:
        model.load(checkpoint_file)

    return model
Example #3
0
    def test_can_fit_model(self):
        """ This test check ability of fitting model in PER to random vector. """
        state_shape = (4, )
        action_space = 2

        model = PrioritizedExperienceReplayTests._create_model(
            state_shape, action_space)
        PER = PrioritizedExperienceReplay(maxlen=1,
                                          model=model,
                                          key_scaling=10,
                                          gamma=1)
        model_wrapper = ModelWrapper(
            model=model, optimizer=K.optimizers.Adam(learning_rate=0.01))
        model_wrapper.compile()

        sample = Sample(action=np.random.randint(0, action_space),
                        state=np.random.rand(state_shape[0]),
                        reward=10,
                        next_state=None)
        PER.add(samples=[sample])

        history_of_loss = []
        fit_vector = np.zeros((action_space, ))
        fit_vector[sample.action] = sample.reward
        for _ in range(100):
            model_wrapper.fit(sample.state, fit_vector)
            history_of_loss.append(PER._loss_calculate(sample=sample))

        for idx, loss in enumerate(history_of_loss[:-1]):
            self.assertGreater(loss, history_of_loss[idx + 1])
Example #4
0
def train(args):
    loss_fn = nn.CrossEntropyLoss(ignore_index=0)
    adaptive = ModelWrapper(args, loss_fn, get_loader)
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        adaptive.model.parameters()),
                                 lr=args.learning_rate,
                                 betas=(args.alpha, args.beta),
                                 weight_decay=args.l2_rate)
    if adaptive.train(optimizer, args):
        return 0
Example #5
0
def main(args):
    loss_fn = nn.CrossEntropyLoss(ignore_index=0)
    adaptive = ModelWrapper(args, loss_fn)

    with open(args.word2idx_path, 'r') as fr:
        word2idx = json.loads(fr.read())
    with open(args.sememe2idx_path, 'r') as fr:
        sememe2idx = json.loads(fr.read())
    results = ResDataset(args.gen_file_path, word2idx, sememe2idx)
    res_loader = data.DataLoader(dataset=results, batch_size=1, shuffle=False)

    scores = adaptive.score(res_loader)

    with codecs.open(args.output_path, 'w', 'utf-8') as fw:
        fw.write('\n'.join(scores))
    return 0
Example #6
0
    def __init__(self, environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 optimizer: K.optimizers.Optimizer,
                 logger: Logger):
        self.environment = environment
        self.memory = memory
        self.policy = policy
        self.model = ModelWrapper(model, optimizer)
        self.logger = logger

        self.history = []

        logger.create_settings_model_file(model)
        logger.create_settings_agent_file(self)
Example #7
0
    def get(self, id):
        m = ModelWrapper("pickle_model.pkl")

        customer = data[data.cuid == id]

        if len(customer) == 0:
            print("Customer not found")
            return {"conv": 0, "revenue": 0}

        print("Customer found")
        res = m.predict(customer)

        print("res[0]", res[0], "res[1]", res[1])

        mes = metrics(customer)

        return {
            "conv": res[0][0],
            "revenue": str(res[1].values[0]),
            "message": mes
        }
Example #8
0
    def __init__(self):
        """ Khởi động ứng dụng """
        super(UiOutputDialog, self).__init__()
        self.model_wrapper = ModelWrapper(model_path)

        self.capture = None

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.update_frame)

        loadUi("window.ui", self)

        self.holistic = mp.solutions.holistic.Holistic()
        self.face = mp.solutions.face_mesh.FaceMesh(max_num_faces=100)
        self.hand = mp.solutions.hands.Hands(max_num_hands=100)

        now = QDate.currentDate()
        current_date = now.toString('ddd dd MMMM yyyy')
        current_time = datetime.datetime.now().strftime("%I:%M %p")
        self.Date_Label.setText(current_date)
        self.Time_Label.setText(current_time)

        self.image = None

        self.settings = UISettings()
        self.Settings_Button.clicked.connect(
            lambda: self.handle_setting_button())

        self.warnings = []
        self.prev_waring_code = -1
        self.prev_waring_code_1 = -1
        self.prev_waring_code_2 = -1
        self.warning_history = UIWarnings()
        self.Warning_History.clicked.connect(
            lambda: self.handle_waring_history_button())

        self.net = cv2.dnn.readNet("yolov3-tiny.weights", "yolov3-tiny.cfg")
        with open("yolov3.txt", 'r') as f:
            self.classes = [line.strip() for line in f.readlines()]
        self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))
Example #9
0
    def __init__(self,
                 environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 logger: Logger,
                 gamma: float,
                 optimizer: K.optimizers.Optimizer,
                 n_step: int = 1):

        self.model = ModelWrapper(model, optimizer)
        #self.model.compile()
        self.current_model = None

        self.gamma = gamma
        self.n_step = n_step

        super(DQN, self).__init__(environment=environment,
                                  memory=memory,
                                  policy=policy,
                                  model=model,
                                  optimizer=optimizer,
                                  logger=logger)
Example #10
0
def generate(args):
    # Load word2idx
    adaptive = ModelWrapper(args, data_loader=get_loader)
    if adaptive.generate(args):
        return 0
        shuffle=True,
        collate_fn=data.image_label_list_of_masks_collate_function)
    validation_dataset_fid = DataLoader(
        data.Places365(path_to_index_file=args.path_to_places365,
                       index_file_name='val.txt',
                       max_length=6000,
                       validation=True),
        batch_size=args.batch_size,
        num_workers=args.batch_size,
        shuffle=False,
        collate_fn=data.image_label_list_of_masks_collate_function)
    validation_dataset = data.Places365(
        path_to_index_file=args.path_to_places365, index_file_name='val.txt')
    # Init model wrapper
    model_wrapper = ModelWrapper(
        generator=generator,
        discriminator=discriminator,
        vgg16=vgg16,
        training_dataset=training_dataset,
        validation_dataset=validation_dataset,
        validation_dataset_fid=validation_dataset_fid,
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer)
    # Perform training
    if args.train:
        model_wrapper.train(epochs=args.epochs, device=args.device)
    # Perform testing
    if args.test:
        print('FID=', model_wrapper.validate(device=args.device))
        model_wrapper.inference(device=args.device)
    RNN(HIDDEN_SIZE),
    layers.RepeatVector(3),
    RNN(128, return_sequences=True),
    layers.TimeDistributed(layers.Dense(len(CHARS), activation='softmax'))
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

train_generator = encode_generator(training_generator, BATCH_SIZE)

hist = model.fit_generator(train_generator,
                           steps_per_epoch=STEPS_PER_EPOCH,
                           epochs=EPOCHS,
                           verbose=1,
                           use_multiprocessing=True,
                           workers=-2,
                           callbacks=callbacks,
                           validation_data=train_generator, validation_steps=30)

score = model.evaluate_generator(encode_generator(
    test_generator, BATCH_SIZE), steps=STEPS_PER_EPOCH)
print(score)

config = build_config(MODEL_NAME, LEARNING_RATE, BATCH_SIZE,
                      EPOCHS, STEPS_PER_EPOCH, score[0], score[1])
wrapper = ModelWrapper(model, config=config)
wrapper.save_model()
Example #13
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 17/11/14 PM2:21
# @Author  : shaoguang.csg
# @File    : main.py

from parse_conf import DataConf, ModelConf
from model_wrapper import ModelWrapper
from utils.logger import logger
from time import time

start = time()

data_conf = DataConf()
model_conf = ModelConf()

model = ModelWrapper(data_conf=data_conf, model_conf=model_conf)
model.train()
logger.info(model.get_weight())
model.evaluate()
result = model.predict()

end = time()

logger.info('time: {}'.format(end - start))

# 2 core 1 threads 116
# 1 core  228
def main():
    # Grab the dataset from scikit-learn
    data = datasets.load_iris()
    X = data['data']
    y = data['target']
    target_names = data['target_names']
    feature_names = [
        f.replace(' (cm)', '').replace(' ', '_') for f in data.feature_names
    ]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=42)
    # Build and train the model
    model = RandomForestClassifier(random_state=101)
    model.fit(X_train, y_train)
    print("Score on the training set is: {:2}".format(
        model.score(X_train, y_train)))
    print("Score on the test set is: {:.2}".format(model.score(X_test,
                                                               y_test)))

    # CHANGES HERE >>>>>>>>>>
    X_mean = X_train.mean(axis=0).round(1)
    feature_defaults = dict(zip(feature_names, X_mean.tolist()))
    wrapped = ModelWrapper(model_name='iris-rf',
                           model_version=MODEL_VERSION,
                           model_object=model,
                           class_labels=target_names.tolist(),
                           feature_defaults=feature_defaults)

    # Save the model
    model_filename = 'iris-rf-v{}.pkl'.format(MODEL_VERSION)
    print("Saving model to {}...".format(model_filename))
    joblib.dump(wrapped, model_filename)

    # ***** Generate test data *****
    print('Generating test data...')
    all_probs = model.predict_proba(X_test)
    all_test_cases = prep_test_cases(X_test, all_probs, feature_names,
                                     target_names)
    test_data_fname = 'testdata_iris_v{}.json'.format(MODEL_VERSION)
    with open(test_data_fname, 'w') as fout:
        json.dump(all_test_cases, fout)

    # ***** Generate test data (Missing) *****
    print('Generating test data with missing values...')
    missing_grps = [(0, ), (1, ), (2, ), (0, 1), (0, 2), (1, 2), (0, 1, 2)]
    X_mean = X_train.mean(axis=0).round(1)
    all_features = []
    all_probs = []
    for missing_cols in missing_grps:
        X_missing = X_test.copy().astype('object')
        X_scored = X_test.copy()
        for col in missing_cols:
            X_missing[:, col] = None
            X_scored[:, col] = X_mean[col]
        # Use the imputed one to find expected probabilities
        all_probs.extend(model.predict_proba(X_scored))
        all_features.extend(X_missing)

    all_test_cases_missing = prep_test_cases(all_features, all_probs,
                                             feature_names, target_names)

    test_data_fname = 'testdata_iris_missing_v{}.json'.format(MODEL_VERSION)
    with open(test_data_fname, 'w') as fout:
        json.dump(all_test_cases_missing, fout)
Example #15
0
     shuffle=True)
 validation_dataset = DataLoader(
     CellInstanceSegmentation(path=os.path.join(args.path_to_data, "val"),
                              augmentation_p=0.0, two_classes=not args.three_classes),
     collate_fn=collate_function_cell_instance_segmentation, batch_size=1, num_workers=1, shuffle=False)
 test_dataset = DataLoader(
     CellInstanceSegmentation(path=os.path.join(args.path_to_data, "test"),
                              augmentation_p=0.0, two_classes=not args.three_classes),
     collate_fn=collate_function_cell_instance_segmentation, batch_size=1, num_workers=1, shuffle=False)
 # Model wrapper
 model_wrapper = ModelWrapper(detr=detr,
                              detr_optimizer=detr_optimizer,
                              detr_segmentation_optimizer=detr_segmentation_optimizer,
                              training_dataset=training_dataset,
                              validation_dataset=validation_dataset,
                              test_dataset=test_dataset,
                              loss_function=InstanceSegmentationLoss(
                                  segmentation_loss=SegmentationLoss(),
                                  ohem=args.ohem,
                                  ohem_faction=args.ohem_fraction),
                              device=device)
 # Perform training
 if args.train:
     model_wrapper.train(epochs=args.epochs,
                         optimize_only_segmentation_head_after_epoch=args.only_train_segmentation_head_after_epoch)
 # Perform validation
 if args.val:
     model_wrapper.validate(number_of_plots=30)
 # Perform testing
 if args.test:
     model_wrapper.test()
Example #16
0
if not os.path.exists(processed_data_dir):
    os.makedirs(processed_data_dir)

train_set_processed_data_path = os.path.join(processed_data_dir,
                                             'train_{}.npy'.format(img_size))
if os.path.exists(train_set_processed_data_path):
    train_set_data = np.load(train_set_processed_data_path)
    print('Data loaded!')
else:
    train_set_data = process_data.process_train_set_data(
        img_size, train_set_raw_data_dir, train_set_processed_data_path)
    print('Data processed!')

model_wrapper = ModelWrapper(learning_rate,
                             img_size,
                             tensorboard_dir=tensorboard_dir)
model = model_wrapper.model

model_path = os.path.join(model_dir, model_wrapper.name)
if os.path.exists(model_path):
    model.load(os.path.join(model_path, model_wrapper.name))
    print('Model loaded!')

train_set_data, validation_set_data = train_set_data[:-500], train_set_data[
    -500:]

train_x = np.array([i[0]
                    for i in train_set_data]).reshape(-1, img_size, img_size,
                                                      1)
train_y = [i[1] for i in train_set_data]
Example #17
0
model_list = os.listdir(model_dir)
print('\nFind model:')

for num, model in enumerate(model_list):
    print(num, model)

model_name = model_list[int(input('\nChoose a model (enter a number): '))]
print('Load Model {}'.format(model_name))

model_path = os.path.join(model_dir, model_name)

learning_rate = float(model_name.split('-')[2])
img_size = int(model_name.split('-')[3])

model = ModelWrapper(learning_rate, img_size).model
model.load(os.path.join(model_path, model_name))
print('Model loaded!')

test_set_processed_data_path = os.path.join(processed_data_dir,
                                            'test_{}.npy'.format(img_size))
if os.path.exists(test_set_processed_data_path):
    test_set_data = np.load(test_set_processed_data_path)
    print('Data loaded!')
else:
    test_set_data = process_data.process_test_set_data(
        img_size, test_set_raw_data_dir, test_set_processed_data_path)
    print('Data processed!')

fig = plt.figure()
#
# data
dataset = Dataset()
dataset.load_vocab_tokens_and_emb()
#

#
config = ModelSettings()
config.vocab = dataset.vocab
config.model_tag = model_tag
config.model_graph = build_graph
config.is_train = False
config.check_settings()
#
model = ModelWrapper(config)
model.prepare_for_prediction()
#

text_raw = ["这本书不错"]
"""
work_book = xlrd.open_workbook(file_raw)
data_sheet = work_book.sheets()[0]
text_raw = data_sheet.col_values(0)
"""

#
preds_list = []
logits_list = []
#
for item in text_raw:
Example #19
0
    shots = args.shots
    token_limit = args.token_limit
    model_type = args.model
    model_size = args.model_size
    data_size = args.data_size
    lr_base = args.lr
    eval_data_size = args.eval_data_size
    eval_batch_size = args.eval_batch_size
    train_batch_size = args.train_batch_size
    local_rank = args.local_rank
    fp16 = args.fp16

    device, n_gpu = setup_device(local_rank)
    if model_type == "bert":
        lm = ModelWrapper('bert',
                          f'bert-{model_size}-uncased',
                          token_limit=token_limit,
                          device=device)
    elif model_type == "roberta":
        lm = ModelWrapper('roberta',
                          f'roberta-{model_size}',
                          token_limit=token_limit,
                          device=device)
    elif model_type == "longformer":
        lm = ModelWrapper('longformer',
                          f'allenai/longformer-{model_size}-4096',
                          token_limit=token_limit,
                          device=device)
    else:
        raise KeyError(f"model type {model_type} not supported")
    if do_mlm:
        classes = [['yes', 'right'], ['maybe'], ['wrong', 'no']]