Beispiel #1
0
def callback(message):
    """
    Function to retrieved the published message and run the
    whole pipeline to create a model in AutoML
    params: message - string containing train budget and session ID
    """
    print(message.data)
    message.ack()
    input_dict = json.loads(message.data)
    print("All the logic to be written here")
    db = TinyDB('version_counter.json')
    try:
        version = db.all()[0]['version']
        print('version', version)
        train_budget = int(input_dict["train_budget"])
        sess_id = input_dict["session_id"]
        print("Making local dir structure for preprocessing file")
        prefix_local_path = str(create_dir(sess_id, PROJECT_ID))
        print(prefix_local_path)
        print("Entering augmentation")
        _, _ = preprocess_automl(DATASET_NAME, BUCKET_NAME, prefix_local_path,
                                 version, OPTIMAL_AUGMENTATION)
        model_id = make_model(PROJECT_ID, COMPUTE_REGION2, DATASET_NAME,
                              BUCKET_NAME, train_budget, MODEL_NAME_PREFIX,
                              version, sess_id)
        model_id = str(model_id)

        db.update({'version': version + 1})

    except Exception as error_message:
        print('Error: ', error_message)
Beispiel #2
0
def main():
    """# 설정 초기화"""
    epochs, learning_rate, use_cuda, device, weight_path, data_path = init()
    """# 데이터 로드, 스플릿"""
    load_splitter = load_split.load_splitter(data_path)
    trn, val, trn_X, trn_y, val_X, val_y, trn_loader, val_loader = load_splitter
    """# 모델생성"""
    model = modeling.make_model(device, weight_path)
    """# 학습"""
    savePath = training(use_cuda, learning_rate, epochs, model, trn_loader,
                        val_loader)
def predict(request,input_tablename,output_tablename):

    # opt = option.Options()
    # weight_path = opt.weight_path
    weight_path = 'C:/Users/bong/project/semiconductor_project/semiconductor_project/web_server/web/predict/test_model_new.pth'

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    """# 데이터 로드"""
    data_realtime = load_data.read_data(input_tablename,output_tablename)

    """# 모델생성"""
    model = modeling.make_model(device, weight_path)

    """# 두께 예측"""
    data_realtime = data_realtime.iloc[:,1:-1]
    data_realtime_numpy = torch.from_numpy(data_realtime.astype(float).values)
    data_realtime_numpy_de = data_realtime_numpy.to(device)
    outputs = model(data_realtime_numpy_de.float()).cpu().detach().numpy()
    # outputs = model(data_realtime_numpy_de.float()).cpu().detach().numpy().round(-1)
    pred_test = pd.DataFrame(outputs)
    pred_test.columns = ['layer_1', 'layer_2', 'layer_3', 'layer_4']

    return pred_test,request
Beispiel #4
0
    def __init__(self, args):
        self.args = args

        # Define Saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()
        # Define Tensorboard Summary
        self.summary = TensorboardSummary(self.saver.experiment_dir)
        self.writer = self.summary.create_summary()

        # Define Dataloader
        kwargs = {'num_workers': args.workers, 'pin_memory': True}
        self.train_loader, self.val_loader, self.test_loader = make_data_loader(
            args, **kwargs)
        # Define network
        # model = DeepLab(num_classes=self.nclass,
        #                 backbone=args.backbone,
        #                 output_stride=args.out_stride,
        #                 sync_bn=args.sync_bn,
        #                 freeze_bn=args.freeze_bn)
        model = make_model(args)
        train_params = [{
            'params': model.get_1x_lr_params(),
            'lr': args.lr
        }, {
            'params': model.get_10x_lr_params(),
            'lr': args.lr * 10
        }]

        # Define Optimizer
        # optimizer = torch.optim.SGD(train_params, momentum=args.momentum,
        #                             weight_decay=args.weight_decay, nesterov=args.nesterov)
        optimizer = torch.optim.Adam(train_params,
                                     args.lr,
                                     amsgrad=True,
                                     eps=1e-4)

        # Define Criterion
        # whether to use class balanced weights
        if args.use_weighted_loss:
            class_w_extra_weights = 3
            weight = (1. /
                      (self.nclass + 3 * class_w_extra_weights)) * np.ones(
                          self.n_class, dtype=np.float32)
            weight[-1] *= 4
            weight[-2] *= 4
            weight[-3] *= 4
            weight = torch.from_numpy(weight)
        else:
            # weight = (1./14) * np.ones(13, dtype=np.float32)
            # weight[3] *= 2
            # weight = torch.from_numpy(weight)
            weight = None

        self.criterion = SegmentationLosses(
            weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)
        self.model, self.optimizer = model, optimizer

        # Define Evaluator
        self.evaluator = Evaluator(self.nclass)
        # Define lr scheduler
        self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs,
                                      len(self.train_loader))

        # Using cuda
        if args.cuda:
            num_ftrs = 256
            decoder_modules = list(self.model.decoder.last_conv[:-1])
            self.model.decoder.last_conv = nn.Sequential(
                *decoder_modules, nn.Conv2d(num_ftrs, self.nclass, 1))

            # # TODO remove this if freezing experiments don't work

            # for name, param in self.model.named_parameters():
            #     if 'last_conv' not in name:
            #         param.requires_grad = False
            # # self.optimizer = torch.optim.SGD(self.model.decoder.last_conv.parameters(), lr=args.lr, momentum=args.momentum,
            # #                         weight_decay=args.weight_decay, nesterov=args.nesterov)
            # self.optimizer = torch.optim.Adam(self.model.decoder.last_conv.parameters(), amsgrad=args.lr)
            # for name, param in self.model.named_parameters():
            #     if 'decoder' not in name:
            #         param.requires_grad = False
            # self.optimizer = torch.optim.SGD(self.model.decoder.parameters(), lr=args.lr, momentum=args.momentum,
            #                         weight_decay=args.weight_decay, nesterov=args.nesterov)
            # self.optimizer = torch.optim.Adam(self.model.decoder.parameters(), amsgrad=args.lr)
            # print(decoder_modules)
            # self.model.decoder.last_conv.8 = nn.Conv2d(num_ftrs, self.nclass, 1)
            # modules = list(self.model.children())
            # print(modules)
            # self.model = torch.nn.Sequential(*modules, nn.Conv2d(num_ftrs, self.nclass, kernel_size=1))
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.args.gpu_ids)
            # patch_replication_callback(self.model)
            self.model = self.model.cuda()

        # Resuming checkpoint
        self.best_pred = 0.0
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                # # model_state = self.model.state_dict()
                # # new_state = {}
                # # for k, v in model_state.items():
                # #     k = k.replace('module.', '')
                # #     new_state[k] = v
                # # del model_state
                # for name, param in checkpoint['state_dict'].named_parameters():
                #     if 'decoder' in name:
                #         del checkpoint['state_dict'][name]

                #         try:
                #             name = name.replace('.module', '')
                #             # param.requires_grad = False
                #             del checkpoint['state_dict'][name]
                #         except Exception as e:
                #             print(e)
                # model_state.update(checkpoint['state_dict'])
                # self.model.module.transfer_state_dict(checkpoint['state_dict'])
                self.model.module.load_state_dict(checkpoint['state_dict'])

                # self.model.module.load_state_dict(model_state)
                # print('STATE', list(checkpoint['state_dict']))
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

        # Clear start epoch if fine-tuning
        if args.ft:
            args.start_epoch = 0