示例#1
0
    def get_data_manager(self, root_dir=None, data_name='syn#1', seed_num=1):
        '''
        This function aims to construct a data manager instance to manage the
        data info for the subsequent processing.
        
        '''
        data_info = data_name.split('#')
        data_kind, data_num = data_info[0], int(data_info[1])
        is_real = False if data_kind.startswith('syn') or data_kind.startswith(
            '2d') else True
        has_outliers = True if data_kind.endswith('otlr') else False

        if data_kind == '2d':
            self.cls_num, num_samples, num_features, dim_reduced = 3, 1000, 2, False
        elif data_kind == 'tcga':
            self.cls_num, num_samples, num_features, dim_reduced = 33, 11135, 5000, False
        else:
            self.cls_num, num_samples, num_features, dim_reduced = 10, 1000, 2000, False

        data_manager = Data_Manager(root_dir = root_dir, is_real = is_real, data_kind = data_kind, \
     data_num = data_num, has_outliers = has_outliers, dim_reduced = dim_reduced, \
     num_of_features = num_features, num_of_samples = num_samples, num_of_cls = self.cls_num, \
     seed = seed_num)

        return data_manager
示例#2
0
def export_fmu(model_path, file_name):
    '''Parse signal exchange blocks and export boptest fmu and kpi json.

    Parameters
    ----------
    model_path : str
        Path to orginal modelica model
    file_name : list
        Path(s) to modelica file and required libraries not on MODELICAPATH.
        Passed to file_name parameter of pymodelica.compile_fmu() in JModelica.

    Returns
    -------
    fmu_path : str
        Path to the wrapped modelica model fmu
    kpi_path : str
        Path to kpi json

    '''

    # Get signal exchange instances and kpi signals
    instances, signals = parse_instances(model_path, file_name)
    # Write wrapper and export as fmu
    fmu_path, _ = write_wrapper(model_path, file_name, instances)
    # Write kpi json
    kpi_path = os.path.join(os.getcwd(), 'kpis.json')
    with open(kpi_path, 'w') as f:
        json.dump(signals, f)

    # open up the FMU and save the kpis json
    man = Data_Manager()
    man.save_data_and_kpisjson(fmu_path=fmu_path)

    return fmu_path, kpi_path
示例#3
0
def load_checkpoint(dir_name, lr, HAS_CUDA):
    # load model parameters

    if HAS_CUDA:
        device = torch.cuda.current_device()
        print(f'Cuda device: {device}')
        checkpoint = torch.load(
            dir_name + '/' + 'checkpoint.pth.tar',
            map_location=lambda storage, loc: storage.cuda(device))
        print('Loaded CUDA version')
    else:
        checkpoint = torch.load(dir_name + '/' + 'checkpoint.pth.tar',
                                map_location='cpu')

    # load pretrained model
    pt_model = checkpoint['pt_model']
    model_pt = models.__dict__[pt_model](pretrained=True)

    # Recreate model
    img_cl = Composite_Classifier(model_pt, checkpoint['n_hid'],
                                  checkpoint['drops'], checkpoint['num_cat'])

    # load model state disctionary
    img_cl.load_state_dict(checkpoint['model'])

    # Recreate optimiser
    optimizer_ft = optim.SGD(img_cl.cf_layers.parameters(),
                             lr=0.001,
                             momentum=0.9)
    optimizer_ft.load_state_dict(checkpoint['optimizer'])
    old_lr = optimizer_ft.param_groups[0]['lr']
    last_epoch_trained_upon = checkpoint['epochs']
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=7,
                                           gamma=0.1)

    # Now move optimise to GPU if necessary
    if HAS_CUDA:
        for state in optimizer_ft.state.values():
            for k, v in state.items():
                if isinstance(v, torch.Tensor):
                    state[k] = v.to(device)
    if old_lr != lr:
        optimizer_ft.param_groups[0]['lr'] = lr
    else:
        # if lr has not been updated put scheduler back to where it was
        exp_lr_scheduler.last_epoch = last_epoch_trained_upon

    # Recreate data object
    data = Data_Manager(checkpoint['data_dir'], checkpoint['phases'],
                        checkpoint['data_tfms'], checkpoint['bs'])

    # Recreate model manager class instance
    phases = checkpoint['phases']
    model_mgr = Solution_Manager(img_cl, checkpoint['loss_function'],
                                 optimizer_ft, exp_lr_scheduler, data, phases,
                                 HAS_CUDA)

    # restore model manager state variables
    model_mgr.epochs = checkpoint['epochs']
    model_mgr.loss_function = checkpoint['loss_function']
    model_mgr.best_accuracy = checkpoint['best_accuracy']
    model_mgr.best_corrects = checkpoint['best_corrects']
    model_mgr.best_loss = checkpoint['best_loss']
    model_mgr.model.class_to_idx = checkpoint['class_to_idx']

    if HAS_CUDA:
        model_mgr.model.cuda()

    # Freeze the pre-trained model layers
    for param in img_cl.model_pt.parameters():
        param.requires_grad = False

    print('Checkpoint loaded')
    return model_mgr, pt_model
示例#4
0
def main():

    global args
    args = parser.parse_args()

    # Default settings
    default_model = 'resnet18'
    default_bs = 16
    sz = 224

    # Take actions based upon initial arguments

    if args.gpu:
        # Check for GPU and CUDA libraries
        HAS_CUDA = torch.cuda.is_available()
        if not HAS_CUDA:
            sys.exit('No Cuda capable GPU detected')
    else:
        HAS_CUDA = False

    checkpoint_dir = args.save_dir

    # Define hyper-parameters

    # Note - allow dropout to be changed when resuming model

    tmp = args.dropout
    tmp = re.sub("[\[\]]", "", tmp)
    drops = [float(item) for item in tmp.split(',')]

    lr = args.learning_rate

    epochs = args.epochs

    # All arguments imported, will start to setup model depending upon whether restarting from checkpoint or
    # from scratch

    if args.resume:
        if os.path.isdir(args.resume):
            print('Loading checkpoint...')
            sol_mgr, pt_model = utility.load_checkpoint(
                args.resume, lr, HAS_CUDA)

    else:
        # Define hidden layer details (note - if resuming will continue with values used earlier

        tmp = args.hidden_units
        tmp = re.sub("[\[\]]", "", tmp)
        n_hid = [int(item) for item in tmp.split(',')]

        # check data directory exists and assign
        data_dir = args.data_directory
        # Check it exists
        if not os.path.exists(data_dir):
            sys.exit('Data directory does not exist')

        # Create model, datasets etc from scratch
        # create datasets and dataloaders
        phrases = ['train', 'valid', 'test']

        # Define data transforms
        data_transforms = {
            'train':
            transforms.Compose([
                transforms.RandomResizedCrop(sz),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ]),
            'valid':
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(sz),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ]),
            'test':
            transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(sz),
                transforms.ToTensor(),
                transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
            ]),
        }

        bs = args.batch_size

        data = Data_Manager(data_dir, phrases, data_transforms, bs)

        # Load cat_to_name
        cat_to_name = utility.load_classes('cat_to_name.json')
        num_cat = len(cat_to_name)

        # Load pre-trained model
        if args.arch is not None:
            pt_model = args.arch
        else:
            pt_model = default_model
        model_pt = models.__dict__[pt_model](pretrained=True)
        num_ftrs = model_pt.fc.in_features

        # Create classifier model
        img_cl = Composite_Classifier(model_pt, n_hid, drops, num_cat)
        # Move to CUDA if available
        if HAS_CUDA:
            img_cl.cuda()

        # Define losses and hyper-parameters
        criterion = nn.CrossEntropyLoss()
        # Optimise just the parameters of the classifier layers
        optimizer_ft = optim.SGD(img_cl.cf_layers.parameters(),
                                 lr=lr,
                                 momentum=0.9)

        exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                               step_size=7,
                                               gamma=0.1)

        # Freeze the pre-trained model layers
        for param in img_cl.model_pt.parameters():
            param.requires_grad = False

        # Create model manager to control training, validation, test and predict with the model and data
        sol_mgr = Solution_Manager(img_cl,
                                   criterion,
                                   optimizer_ft,
                                   exp_lr_scheduler,
                                   data,
                                   phrases,
                                   HAS_CUDA=HAS_CUDA)
        sol_mgr.model.class_to_idx = data.image_datasets['train'].class_to_idx

    # Train model
    sol_mgr.train(epochs=epochs)

    # Evaluate model against test set
    sol_mgr.test_with_dl()

    # Save Checkpoint
    utility.save_checkpoint(args.save_dir, sol_mgr, pt_model, HAS_CUDA)
示例#5
0
    def __init__(self,
                 board_dim=8,
                 time_steps=1,
                 n_filters=256,
                 conv_size=3,
                 n_res=40,
                 c=.1):
        """
        :param board_dim: dimension of game board
        :param time_steps: number of time steps kept in state history
        :param n_filters: number of convolutional filters per conv layer
        :param conv_size: size of convolutions
        :param n_res: number of residual layers
        :param c: regularization scale constant
        """
        self.board_dim = board_dim
        self.time_steps = time_steps
        self.losses = None
        self.n_conv_filters = n_filters
        self.conv_size = conv_size
        self.n_res_layers = n_res
        self.regularizer = tf.contrib.layers.l2_regularizer(scale=c)
        self.dm = Data_Manager(max_size=(board_dim**2 - 4) *
                               500)  # moves per game TIMES num games to save

        # --------------
        # Make Network
        # --------------

        with tf.Graph().as_default() as net1_graph:
            self.input_layer = tf.placeholder(shape=[
                None, self.board_dim, self.board_dim, (self.time_steps * 2 + 1)
            ],
                                              dtype=tf.float32,
                                              name='input')
            self.net = self._add_conv_layer(self.input_layer, name='conv1')
            for i in range(self.n_res_layers):
                self.net = self._add_res_layer(self.net,
                                               name='res{}'.format(i + 1))

            self.policy_logits = self._policy_head(self.net)
            self.value_estimate = self._value_head(self.net)

            self.mcts_pi = tf.placeholder(
                shape=[None, (self.board_dim**2 + 1)],
                dtype=tf.float32,
                name='pi')
            self.winner_z = tf.placeholder(shape=[None, 1],
                                           dtype=tf.float32,
                                           name='z')

            # Loss, composed of cross entropy, mse, and regularization
            xent = tf.nn.softmax_cross_entropy_with_logits(
                labels=self.mcts_pi, logits=self.policy_logits)
            mse = tf.losses.mean_squared_error(self.winner_z,
                                               self.value_estimate)
            reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
            self.loss = tf.reduce_mean(mse - xent + sum(reg_losses))

            self.optimizer = tf.train.AdamOptimizer().minimize(
                self.loss)  # tune learning rate

            # more ops
            self.init_op = tf.global_variables_initializer()
            self.saver = tf.train.Saver()

        # initialize session
        self.sess = tf.Session(graph=net1_graph)
        self.sess.run(self.init_op)
示例#6
0
from agent import Agent

visualizer.init_visualizer()

WINDOW_SIZE = 60
BATCH_SIZE = 30
EPISODE = 8
LEARNING_RATE = 0.001
VALIDATION = 0  # train data에서 이 비율만큼 validation data로 사용
ENSEMBLE_NUM = 16
USE_TOP_N_AGENT = ENSEMBLE_NUM // 3

ROLLING_TRAIN_TEST = False

# 학습/ 테스트 data 설정
dm = Data_Manager('./gaps.db', 20151113, 20180615, split_ratio=(0.6, 0.2, 0.2))
df = dm.load_db()
train_df, val_df, test_df = dm.generate_feature_df(df, WINDOW_SIZE)
print('train: {} ~ {}'.format(train_df.iloc[0].name, train_df.iloc[-1].name))
print('val  : {} ~ {}'.format(val_df.iloc[WINDOW_SIZE].name,
                              val_df.iloc[-1].name))
print('test: {} ~ {}'.format(test_df.iloc[WINDOW_SIZE].name,
                             test_df.iloc[-1].name))
print("데이터 수 train: {}, val: {}, test: {}".format(len(train_df), len(val_df),
                                                  len(test_df)))

visualizer.plot_dfs(
    [train_df, val_df.iloc[WINDOW_SIZE:], test_df.iloc[WINDOW_SIZE:]],
    ['train', 'val', 'test'])
print("학습 데이터의 asset 개수 : ", len(train_df.columns.levels[0]))