Ejemplo n.º 1
0
    def __init__(self, logging_path, C=2.5, gamma=0.001, threshold=1e-12, p_poly=1):
        self.logging_path = logging_path
        initialize_logger(logging_path)

        self.C = C
        self.gamma = gamma

        self.data_path = join(getcwd(), 'project2', 'Data')
        self.process_dataset()
        self.train_len = len(self.train_x)

        self.lower_bound = np.zeros((len(self.train_x), 1))
        self.upper_bound = np.full((len(self.train_x), 1), C)

        self.threshold = threshold
        self.p_poly = p_poly

        k_poly = self.poly_kernel(self.train_x, self.train_x, self.p_poly)

        y1 = np.eye(self.train_len) * self.train_y2
        y4 = np.eye(self.train_len) * self.train_y4
        y6 = np.eye(self.train_len) * self.train_y6

        self.P1 = np.dot((np.dot(y1, k_poly)), y1)
        self.P4 = np.dot((np.dot(y4, k_poly)), y4)
        self.P6 = np.dot((np.dot(y6, k_poly)), y6)

        self.h = np.concatenate([self.lower_bound, self.upper_bound], 0)
        self.e = -1 * np.ones((self.train_len, 1))
        self.Gp, self.Gn = np.eye(self.train_len), -np.eye(self.train_len)
        self.G = np.concatenate([self.Gn, self.Gp], 0)

        print(f'C = {self.C}, gamma = {self.gamma}')
Ejemplo n.º 2
0
    def __init__(self, path, noise_dim=100, input_dim=(28, 28, 1),
                 optimizer='adam_beta', batch_size=128, visualize=True):
        initialize_logger()
        configure_tf()
        self.path = path

        self.name = 'gan'
        self.input_dim = input_dim
        self.z_dim = noise_dim

        self.batch_size = batch_size
        self.train_x, self.train_y = load_mnist()
        logging.info('Dataset is loaded')

        self.optimizer = optimizer
        self.discriminator_lr = 1e-5
        self.generator_lr = self.discriminator_lr / 2

        self.weight_initialization = RandomNormal(mean=0., stddev=0.02, seed=0)
        self.epochs = 6000
        self.sample_every_n_steps = 200
        self.discriminator_losses, self.generator_losses = [], []

        self._build_discriminator_network()
        logging.info('Discriminator model is built')
        self._build_generator_network()
        logging.info('Generator model is built')
        self._build_adversarial_network()
        logging.info('GAN is built')

        if visualize:
            print(self.model.summary())
            print(self.generator.summary())
            print(self.discriminator.summary())
Ejemplo n.º 3
0
def main():
    """ Parse Lighthouse JSON for failing elements and capture images of those elements """
    args = get_args()
    input_file = args.input_file
    assets_dir = args.assets_dir
    output_dir = os.path.dirname(args.input_file)
    sleep = args.sleep
    initialize_logger('capture', output_dir)
    logging.info('Starting image creation...')
    if args.driver == 'firefox':
        driver = get_firefox_driver()
    elif args.driver == 'chrome':
        driver = get_chrome_driver()
    else:
        raise ValueError("Driver must be one of: firefox, chrome")
    try:
        with open(input_file, encoding='utf-8') as json_file:
            data = json.load(json_file)
            detect_full_html_loaded(driver)
            capture_screenshot(assets_dir, data['finalUrl'], sleep, driver)
        for sel in identifier_generator(
                data, 'color-contrast', 'link-name', 'button-name',
                'image-alt', 'input-image-alt', 'label', 'accesskeys',
                'frame-title', 'duplicate-id', 'list', 'listitem',
                'definition-list', 'dlitem', 'aria-allowed-attr',
                'aria-required-attr', 'aria-required-children',
                'aria-required-parent', 'aria-roles', 'aria-valid-attr-value',
                'aria-valid-attr'):
            capture_element_pic(input_file, assets_dir, data['finalUrl'], sel,
                                sleep, driver)
    finally:
        driver.quit()
        logging.info('Image creation complete in: "' + assets_dir + '"')
Ejemplo n.º 4
0
def main():
    """ Parse Lighthouse JSON and convert to Markdown """
    args = get_args()
    input_file = args.input_file
    output_file = args.output_file
    output_dir = os.path.dirname(args.input_file)
    paths = list()
    if args.user_template_path:
        user_template_path = args.user_template_path
        paths.append(user_template_path)
    else:
        paths.append(template_path)
    loader = jinja2.FileSystemLoader(paths)

    env = jinja2.Environment(loader=loader)

    template = loader.load(env, 'index.md')

    rendered = template.render({
        'data': preprocess_data(read_input(input_file)),
        'generate_img_filename': generate_img_filename,
    })

    write_output(args.output_file,
                 rendered,
                 force_stdout=args.e or not output_file)
    initialize_logger('markdown', output_dir)
    logging.info('Markdown convertion complete in: ' + args.output_file)
Ejemplo n.º 5
0
def main():
    input_dim = 6
    spatial_dims = [0, 1, 2]
    args = utils.read_args()

    experiment_dir = utils.get_experiment_dir(args.name, args.run)
    utils.initialize_experiment_if_needed(experiment_dir, args.evaluate)
    # Logger will print to stdout and logfile
    utils.initialize_logger(experiment_dir)

    # Optionally restore arguments from previous training
    # Useful if training is interrupted
    if not args.evaluate:
        try:
            args = utils.load_args(experiment_dir)
        except:
            args.best_tpr = 0.0
            args.nb_epochs_complete = 0  # Track in case training interrupted
            utils.save_args(experiment_dir, args)  # Save initial args

    net = utils.create_or_restore_model(experiment_dir, args.nb_hidden,
                                        args.nb_layer, input_dim, spatial_dims)
    if torch.cuda.is_available():
        net = net.cuda()
        logging.warning("Training on GPU")
        logging.info("GPU type:\n{}".format(torch.cuda.get_device_name(0)))
    criterion = nn.functional.binary_cross_entropy
    if not args.evaluate:
        assert (args.train_file != None)
        assert (args.val_file != None)
        train_loader = construct_loader(args.train_file,
                                        args.nb_train,
                                        args.batch_size,
                                        shuffle=True)
        valid_loader = construct_loader(args.val_file, args.nb_val,
                                        args.batch_size)
        logging.info("Training on {} samples.".format(
            len(train_loader) * args.batch_size))
        logging.info("Validate on {} samples.".format(
            len(valid_loader) * args.batch_size))
        train(net, criterion, args, experiment_dir, train_loader, valid_loader)

    # Perform evaluation over test set
    try:
        net = utils.load_best_model(experiment_dir)
        logging.warning("\nBest model loaded for evaluation on test set.")
    except:
        logging.warning(
            "\nCould not load best model for test set. Using current.")
    assert (args.test_file != None)
    test_loader = construct_loader(args.test_file, args.nb_test,
                                   args.batch_size)
    test_stats = evaluate(net, criterion, experiment_dir, args, test_loader,
                          TEST_NAME)
Ejemplo n.º 6
0
def main():
    """ Pass arguments, check csv validity, and add hash """
    args = get_args()
    input_file = args.input_file
    output_file = args.output_file
    initialize_logger('add_hash')
    try:
        if check_header(input_file, ):
            add_hash(input_file, output_file)
    except Exception as ex:
        logging.error(ex)
Ejemplo n.º 7
0
def main():
    """ Pass in arguments """
    args = get_args()
    new = args.new
    old = args.old
    diff = args.diff
    initialize_logger('add_hash')
    try:
        if check_header(old) and check_header(new):
            identify_diffs(old, new, diff)
    except Exception as ex:
        logging.error(ex)
Ejemplo n.º 8
0
    def __init__(self, logging_path, C=2.5, gamma=0.01):
        initialize_logger(logging_path)
        self.C = C
        self.gamma = gamma
        self.logging_path = logging_path
        self.data_path = os.path.join(os.getcwd(), 'project2', 'Data')
        self.train_x, self.train_y, self.test_x, self.test_y = load_mnist(
            self.data_path, kind='train')
        logging.info('Dataset is loaded.')
        self.bias = 0
        self.hessian_mat = np.zeros(shape=(self.train_y.shape[0],
                                           self.train_y.shape[0]))
        self.e = -1. * np.ones(shape=(self.train_y.shape[0], 1))
        self.lambda_star = None

        print(f'C = {self.C}, gamma = {self.gamma}')
Ejemplo n.º 9
0
def main():
    """ Pass arguments, check csv validity, and add hash """
    args = get_args()
    input_file = args.input_file
    output_file = args.output_file
    output_dir = os.path.dirname(args.input_file)
    initialize_logger('add_hash', output_dir)
    if output_file == "-":
        # yes_or_no("Are you sure you want to add hashes to the '%s' file? (keeping a backup is recommended)" % (input_file))
        output_file = tempfile.gettempdir() + 'tmp.csv'
    try:
        if check_header(input_file, ['url'], ['hash']):
            add_hash(input_file, output_file)
            os.remove(input_file)
            os.rename(output_file, input_file)

    except Exception as ex:
        logging.error(ex)
Ejemplo n.º 10
0
def main():
    """ Pass in arguments """
    args = get_args()
    new = args.new
    old = args.old
    diff = args.diff
    output_dir = os.path.dirname(args.new)
    initialize_logger('identify_diffs', output_dir)

    if check_header(old, ['url', 'hash', 'comply'], []) and check_header(
            new, ['url', 'hash', 'comply'], ['diff']):
        try:
            identify_diffs(old, new, diff)
            if diff == "-":
                # yes_or_no("Are you sure you want to add the data to the existing '%s' file? (keeping a backup is recommended)" % (input_file))
                os.remove(new)
                os.rename(diff, new)
                logging.info("Updated '%s' file with diff" % (new))
            else:
                logging.info("Created new '%s' file with diff" % (new))
        except Exception as ex:
            logging.error(ex)
Ejemplo n.º 11
0
def main():

    cudnn.benchmark = True
    base_path = '/NSL/data/images/HyperspectralImages/ICVL/'
    # Dataset
    val_data = DatasetFromHdf5(base_path + '/testclean_si50_st80.h5')
    print(len(val_data))

    # Data Loader (Input Pipeline)
    val_loader = DataLoader(dataset=val_data,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

    # Model
    model_path = base_path + 'hscnn_5layer_dim10_93.pkl'
    result_path = base_path + '/test_results/'
    var_name = 'rad'

    save_point = torch.load(model_path)
    model_param = save_point['state_dict']
    model = resblock(conv_relu_res_relu_block, 16, 3, 31)
    model = nn.DataParallel(model)
    model.load_state_dict(model_param)

    model = model.cuda()
    model.eval()

    model_path = base_path
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train.log')
    logger = initialize_logger(log_dir)

    test_loss = validate(val_loader, model, rrmse_loss)

    print("Test Loss: %.9f " % (test_loss))
    # save loss
    record_loss(loss_csv, test_loss)
Ejemplo n.º 12
0
# histogram equlization
hist_eq=False

# pre train
pre_train=False
#%%

########## log
path2logs='./output/logs/'
now = datetime.datetime.now()
info='log_trainval_'
suffix = info + '_' + str(now.strftime("%Y-%m-%d-%H-%M"))
# Direct the output to a log file and to screen
loggerFileName = os.path.join(path2logs,  suffix + '.txt')
utils.initialize_logger(loggerFileName)


#%%

# random data generator
datagen = ImageDataGenerator(featurewise_center=False,
        samplewise_center=False,
        rescale=1/255.,
        #preprocessing_function=preprocess_input,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=45,
        width_shift_range=0.1,
        height_shift_range=0.1,
Ejemplo n.º 13
0
    # Hyper-parameters
    parser.add_argument("--window", type=int, default=10)

    # Training vars
    parser.add_argument("--epochs", type=int, default=10)
    parser.add_argument("--sample", type=float, default=1e-5)

    params = vars(parser.parse_args())
    return params


if __name__ == "__main__":
    np.random.seed(0)

    initialize_logger()

    params = parse_arg()

    (file_path, train_o_matic_file_path, sew_dir_path, output_file_path,
     mapping_file, filtered_corpus_output, model_path,
     tab_file_path) = directory_variables()

    sentences = parse_datasets(file_path, train_o_matic_file_path,
                               sew_dir_path, output_file_path, mapping_file)

    w2v_model = build_model(sentences, params["window"], params["sample"])

    w2v_model = train_multiple_corpus(sew_dir_path, w2v_model, tab_file_path,
                                      params)
Ejemplo n.º 14
0
    desc = """Script performs NBA stats searching."""
    parser = argparse.ArgumentParser(description=desc)
    parser.add_argument("-f",
                        "--file",
                        default=False,
                        required=True,
                        help="Input file with NBA data.")
    args = parser.parse_args()
    path_to_file = args.file
    # Is file with data exist?
    if not os.path.exists(path_to_file):
        LOG.error("No file with data.")
        sys.exit(1)
    return path_to_file


def main(fpath):
    """test"""
    LOG.info("Starting script")
    LOG.debug(fpath)
    # To fix the break line verification in the end of csv file on start.
    # Uncomment line below
    # add_csv_string(fpath)
    statwork = NBAStats(fpath, LOG)
    statwork.main()


if __name__ == '__main__':
    LOG = initialize_logger("logs", "nba", "INFO")
    sys.exit(main(parse_arg()))
Ejemplo n.º 15
0
def main():

    cudnn.benchmark = True
    # Dataset
    train_data = DatasetFromHdf5('./Data/train_Material_.h5')
    print(len(train_data))
    val_data = DatasetFromHdf5('./Data/valid_Material_.h5')
    print(len(val_data))

    # Data Loader (Input Pipeline)
    train_data_loader = DataLoader(dataset=train_data,
                                   num_workers=1,
                                   batch_size=64,
                                   shuffle=True,
                                   pin_memory=True)
    val_loader = DataLoader(dataset=val_data,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

    # Model

    model = resblock(conv_bn_relu_res_block, 10, 25, 25)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()

    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 100
    init_lr = 0.0001
    iteration = 0
    record_test_loss = 1000
    # criterion_RRMSE = torch.nn.L1Loss()
    criterion_RRMSE = rrmse_loss
    criterion_Angle = Angle_Loss
    criterion_MSE = torch.nn.MSELoss()
    criterion_SSIM = pytorch_msssim.SSIM()
    # criterion_Div = Divergence_Loss
    criterion_Div = torch.nn.KLDivLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=init_lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0.01)

    model_path = './models/'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss_material.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train_material.log')
    logger = initialize_logger(log_dir)

    # Resume
    resume_file = ''
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(start_epoch + 1, end_epoch):

        start_time = time.time()
        train_loss, iteration, lr = train(train_data_loader, model,
                                          criterion_MSE, criterion_RRMSE,
                                          criterion_Angle, criterion_SSIM,
                                          criterion_Div, optimizer, iteration,
                                          init_lr, end_epoch, epoch)
        test_loss, loss_angle, loss_reconstruct, loss_SSIM, loss_Div = validate(
            val_loader, model, criterion_MSE, criterion_RRMSE, criterion_Angle,
            criterion_SSIM, criterion_Div)

        # xxx_loss = validate_save(val_loader, model, criterion_MSE, criterion_RRMSE, epoch)

        save_checkpoint_material(model_path, epoch, iteration, model,
                                 optimizer)

        # print loss
        end_time = time.time()
        epoch_time = end_time - start_time
        print(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f , Angle Loss: %.9f, Recon Loss: %.9f, SSIM Loss: %.9f ,  Div Loss: %.9f"
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss,
               loss_angle, loss_reconstruct, loss_SSIM, loss_Div))

        # save loss
        record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss,
                    test_loss)
        logger.info(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f, Angle Loss: %.9f, Recon Loss: %.9f, SSIM Loss: %.9f,  Div Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss,
               loss_angle, loss_reconstruct, loss_SSIM, loss_Div))
Ejemplo n.º 16
0
def main():
    cudnn.benchmark = True

    # load dataset
    print("\nloading dataset ...")
    train_data1 = HyperDatasetTrain1(mode='train')
    train_data2 = HyperDatasetTrain2(mode='train')
    train_data3 = HyperDatasetTrain3(mode='train')
    train_data4 = HyperDatasetTrain4(mode='train')
    print("Train1:%d,Train2:%d,Train3:%d,Train4:%d," % (
        len(train_data1),
        len(train_data2),
        len(train_data3),
        len(train_data4),
    ))
    val_data = HyperDatasetValid(mode='valid')
    print("Validation set samples: ", len(val_data))
    # Data Loader (Input Pipeline)
    train_loader1 = DataLoader(dataset=train_data1,
                               batch_size=opt.batchSize,
                               shuffle=True,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    train_loader2 = DataLoader(dataset=train_data2,
                               batch_size=opt.batchSize,
                               shuffle=True,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    train_loader3 = DataLoader(dataset=train_data3,
                               batch_size=opt.batchSize,
                               shuffle=True,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    train_loader4 = DataLoader(dataset=train_data4,
                               batch_size=opt.batchSize,
                               shuffle=True,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    train_loader = [train_loader1, train_loader2, train_loader3, train_loader4]
    val_loader = DataLoader(dataset=val_data,
                            batch_size=1,
                            shuffle=False,
                            num_workers=2,
                            pin_memory=True)

    # model
    print("\nbuilding models_baseline ...")
    model = AWAN(3, 31, 200, 8)
    print('Parameters number is ',
          sum(param.numel() for param in model.parameters()))
    criterion_train = LossTrainCSS()
    criterion_valid = Loss_valid()
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)  # batchsize integer times
    if torch.cuda.is_available():
        model.cuda()
        criterion_train.cuda()
        criterion_valid.cuda()

    # Parameters, Loss and Optimizer
    start_epoch = 0
    iteration = 0
    record_val_loss = 1000
    optimizer = optim.Adam(model.parameters(),
                           lr=opt.init_lr,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0)

    # visualzation
    if not os.path.exists(opt.outf):
        os.makedirs(opt.outf)
    loss_csv = open(os.path.join(opt.outf, 'loss.csv'), 'a+')
    log_dir = os.path.join(opt.outf, 'train.log')
    logger = initialize_logger(log_dir)

    # Resume
    # resume_file = opt.outf + '/net_10epoch.pth'
    resume_file = ''
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    # start epoch
    for epoch in range(start_epoch + 1, opt.end_epoch):
        start_time = time.time()
        train_loss, iteration, lr = train(train_loader, model, criterion_train,
                                          optimizer, epoch, iteration,
                                          opt.init_lr, opt.decay_power,
                                          opt.trade_off)
        val_loss = validate(val_loader, model, criterion_valid)
        # Save model
        if torch.abs(val_loss -
                     record_val_loss) < 0.0001 or val_loss < record_val_loss:
            save_checkpoint(opt.outf, epoch, iteration, model, optimizer)
            if val_loss < record_val_loss:
                record_val_loss = val_loss
        # print loss
        end_time = time.time()
        epoch_time = end_time - start_time
        print(
            "Epoch [%02d], Iter[%06d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, val_loss))
        # save loss
        record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss,
                    val_loss)
        logger.info(
            "Epoch [%02d], Iter[%06d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, val_loss))
Ejemplo n.º 17
0
Archivo: ap.py Proyecto: schemen/AtmoPi
import click
import logging
from queue import Queue
from threading import Thread

import utils
from exporters import Exporter
from collector import Collector

# Python 3 is required!
if sys.version_info[0] < 3:
    sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
    sys.exit(1)

# Logging
utils.initialize_logger("log/")

# Load config right at the start
config = utils.load_config()


# Create the core CLI launcher
@click.group()
def cli():
    """Welcome to AtmoPi, your very own DIY Weather station"""
    pass


#######################################
### Verify command                  ###
#######################################
Ejemplo n.º 18
0
def main():
    #https://drive.google.com/file/d/1QxQxf2dzfSbvCgWlI9VuxyBgfmQyCmfE/view?usp=sharing - train data
    #https://drive.google.com/file/d/11INkjd_ajT-RSCSFqfB7reLI6_m1jCAC/view?usp=sharing - val data
    #https://drive.google.com/file/d/1m0EZaRjla2o_eL3hOd7UMkSwoME5mF4A/view?usp=sharing - extra val data
    cudnn.benchmark = True
  #  train_data = DatasetFromHdf5('C:/Users/alawy/Desktop/Training/Training-shadesofgrey/train_tbands.h5')
    train_data = DatasetFromHdf5('/storage/train_cropped14.h5')

    print(len(train_data))
    val_data_extra = DatasetFromHdf5('/storage/valid_extra99.h5')
    val_data = DatasetFromHdf5('/storage/valid_cropped89.h5')
    new_val=[]
    new_val.append(val_data)
    new_val.append(val_data_extra)
    print(len(new_val))
    print('con')
    val_new = data.ConcatDataset(new_val)
    print(len(val_new))

    # Data Loader (Input Pipeline)
    train_data_loader = DataLoader(dataset=train_data, 
                                   num_workers=4,  
                                   batch_size=512,
                                   shuffle=True,
                                   pin_memory=True)
    val_loader = DataLoader(dataset=val_new,
                            num_workers=1, 
                            batch_size=1,
                            shuffle=False,
                           pin_memory=True)
    # Dataset
   # torch.set_num_threads(12)
    # Model               
    model = resblock(conv_relu_res_relu_block, 16, 3, 25)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    if torch.cuda.is_available():
        model = model.to('cuda')
    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 1000
    init_lr = 0.0002
    iteration = 0
    record_test_loss = 1000
    criterion = rrmse_loss
    #optimizer=torch.optim.AdamW(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
    optimizer=torch.optim.Adam(model.parameters(), lr=init_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01)
   # model_path = '/storage/models-crop/'
    model_path = './models-crop/'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path,'loss.csv'), 'w+')
    
    log_dir = os.path.join(model_path,'train.log')
    logger = initialize_logger(log_dir)
    
    # Resume
    resume_file = ''
    #resume_file = '/storage/notebooks/r9h1kyhq8oth90j/models/hscnn_5layer_dim10_69.pkl' 
    #resume_file = '/storage/notebooks/r9h1kyhq8oth90j/models-crop/hscnn_5layer_dim10_95.pkl'
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
       
    for epoch in range(start_epoch+1, end_epoch):
        
        start_time = time.time()         
        train_loss, iteration, lr = train(train_data_loader, model, criterion, optimizer, iteration, init_lr, end_epoch)
        test_loss = validate(val_loader, model, criterion)
        
 
        
        # Save model
        if test_loss < record_test_loss:
            record_test_loss = test_loss
            save_checkpoint(model_path, epoch, iteration, model, optimizer)
        else:
            save_checkpoint(model_path, epoch, iteration, model, optimizer)
        # print loss 
        end_time = time.time()
        epoch_time = end_time - start_time
        print ("Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " %(epoch, iteration, epoch_time, lr, train_loss, test_loss))
        # save loss
        record_loss(loss_csv,epoch, iteration, epoch_time, lr, train_loss, test_loss)     
        logger.info("Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f " %(epoch, iteration, epoch_time, lr, train_loss, test_loss))
        gc.collect()
def main(**kwargs):
    # 1. Parse command line arguments.
    opt._parse(kwargs)

    # 2. Visdom
    # vis = Visualizer(env=opt.env)

    # 3. GPU settings
    # n_gpu = utils.set_gpu('0,1')

    # 4. Configure model
    logging.info('==> Traing model for clothing type: {}'.format(opt.category))
    cudnn.benchmark = True
    net = getattr(models, opt.model)(opt)

    # 5. Initialize logger
    cur_time = time.strftime('%Y-%m-%dT%H:%M:%S', timm.localtime())
    initialize_logger(f'{opt.category}_{opt.model}_{cur_time}')

    # 6. Initialize checkpoints directory
    lr = opt.lr
    start_epoch = 1
    best_val_loss = float('inf')

    if opt.load_checkpoint_path:
        logging.info('==> Resuming from checkpoint...')
        checkpoint = torch.load(opt.load_checkpoint_path)
        start_epoch = checkpoint['epoch'] + 1
        lr = checkpoint['lr']
        best_val_loss = checkpoint['best_val_loss']
        net.load_state_dict(checkpoint['state_dict'])

    # 7. Data setup
    train_dataset = FashionAIKeypoints(opt, phase='train')
    logging.info('Train sample number: {}'.format(len(train_dataset)))
    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              collate_fn=train_dataset.collate_fn,
                              pin_memory=True)

    val_dataset = FashionAIKeypoints(opt, phase='val')
    logging.info('Val sample number: {}'.format(len(val_dataset)))
    val_loader = DataLoader(val_dataset,
                            batch_size=opt.batch_size,
                            shuffle=False,
                            num_workers=opt.num_workers,
                            collate_fn=val_dataset.collate_fn,
                            pin_memory=True)

    net = net.cuda()
    # net = DataParallel(net)
    loss = CPNLoss()
    loss = loss.cuda()

    # 8. Loss, optimizer and LR scheduler
    optimizer = torch.optim.SGD(net.parameters(),
                                lr,
                                momentum=0.9,
                                weight_decay=1e-4)
    lrs = LRScheduler(lr,
                      patience=3,
                      factor=0.1,
                      min_lr=0.01 * lr,
                      best_loss=best_val_loss)

    # 9. Training loop
    for epoch in range(start_epoch, opt.max_epochs + 1):
        # Training
        logging.info("Start training loop...")
        train_metrics, train_time = train(train_loader, net, loss, optimizer,
                                          lr)

        # Validating
        logging.info("Start validating loop...")
        with torch.no_grad():
            val_metrics, val_time = validate(val_loader, net, loss)

        log_model(epoch, lr, train_metrics, train_time, val_metrics, val_time)

        val_loss = np.mean(val_metrics[:, 0])
        lr = lrs.update_by_rule(val_loss)

        # Save checkpoints
        if val_loss < best_val_loss or epoch % 10 == 0 or lr is None:
            if val_loss < best_val_loss:
                best_val_loss = val_loss

            state_dict = net.module.state_dict()

            for key in state_dict.keys():
                state_dict[key] = state_dict[key].cpu()

            torch.save(
                {
                    'epoch': epoch,
                    'save_dir': opt.checkpoint_path,
                    'state_dict': state_dict,
                    'lr': lr,
                    'best_val_loss': best_val_loss
                }, opt.checkpoint_path /
                'kpt_{}_{:03d}.ckpt'.format(opt.category, epoch))

        if lr is None:
            logging.info('Training is early-stopped')
            break
Ejemplo n.º 20
0
def main():

    cudnn.benchmark = True
    base_path = '/NSL/data/images/HyperspectralImages/ICVL/'
    # Dataset
    train_data = DatasetFromHdf5(base_path + '/train.h5')
    print(len(train_data))
    val_data = DatasetFromHdf5(base_path + '/valid.h5')
    print(len(val_data))

    # Data Loader (Input Pipeline)
    train_data_loader = DataLoader(dataset=train_data,
                                   num_workers=1,
                                   batch_size=64,
                                   shuffle=True,
                                   pin_memory=True)
    val_loader = DataLoader(dataset=val_data,
                            num_workers=1,
                            batch_size=1,
                            shuffle=False,
                            pin_memory=True)

    # Model
    model = resblock(conv_batch_relu_res_block, 16, 3, 31)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()

    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 1000
    init_lr = 0.0002
    iteration = 0
    record_test_loss = 1000
    criterion = rrmse_loss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=init_lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0)

    model_path = base_path + '/models/'
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train.log')
    logger = initialize_logger(log_dir)

    # Resume
    resume_file = ''
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(start_epoch + 1, end_epoch):

        print("epoch [%d]" % (epoch))
        start_time = time.time()
        train_loss, iteration, lr = train(train_data_loader, model, criterion,
                                          optimizer, iteration, init_lr,
                                          end_epoch)
        print("train done! epoch [%d]" % (epoch))
        test_loss = validate(val_loader, model, criterion)
        print("test done! epoch [%d]" % (epoch))

        # Save model
        if test_loss < record_test_loss:
            record_test_loss = test_loss
            save_checkpoint(model_path, epoch, iteration, model, optimizer)

        # print loss
        end_time = time.time()
        epoch_time = end_time - start_time
        print(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss))
        # save loss
        record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss,
                    test_loss)
        logger.info(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, test_loss))
Ejemplo n.º 21
0
"""Web / UI Server for Soundwave"""
import json
import os
from datetime import datetime

import cherrypy
import cherrypy.wsgiserver
import requests
from flask import Flask, request, render_template, g, send_from_directory

from config import SOUNDWAVE_API, SOUNDWAVE_HOST, SOUNDWAVE_PORT
from utils import get_soundwave_url, json_loads_byteified, \
    initialize_logger, get_table_data, query_soundwave, get_reservation_rows

# Initializations
logger = initialize_logger()

app = Flask(__name__, template_folder='templates', static_folder='static')


@app.before_request
def log_request():
    """Request logger."""
    if "static" not in request.path:
        logger.info("\npath: %s \nhttp-method: %s \nheaders: %s",
                    str(request.path), str(request.method),
                    str(request.headers))

        g.start = datetime.now()

def main():

    cudnn.benchmark = True
    train_data = Dataset_cave_train('./data/train')
    print('number of train data: ', len(train_data))
    val_data = Dataset_cave_val('./data/test')
    print('number of validate data: ', len(val_data))

    # Model
    model = Net(HSI_num_residuals=args.HSI_num_residuals,
                RGB_num_residuals=args.RGB_num_residuals)

    # multi-GPU setup
    device = torch.device("cuda:0, 1" if torch.cuda.is_available() else "cpu")
    model = nn.DataParallel(model)

    model = model.to(device=device, dtype=torch.float)  # float32
    model.apply(weights_init_kaiming)

    # Parameters, Loss and Optimizer
    start_epoch = 0
    end_epoch = 501
    init_lr = 0.0002
    iteration = 0

    criterion = MyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=init_lr,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=0)
    # scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10, verbose=False,
    #     threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)

    model_path = args.model_path
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    loss_csv = open(os.path.join(model_path, 'loss.csv'), 'w+')

    log_dir = os.path.join(model_path, 'train.log')
    logger = initialize_logger(log_dir)

    # Resume
    resume_file = ''
    if resume_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            # start_epoch = checkpoint['epoch']
            # iteration = checkpoint['iter']
            model.load_state_dict(checkpoint['state_dict'])
            # optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(start_epoch + 1, end_epoch):

        train_data = Dataset_cave_train('./data/train')
        train_data_loader = DataLoader(
            dataset=train_data,
            num_workers=8,
            batch_size=16,
            shuffle=True,
            pin_memory=True,
        )

        val_data = Dataset_cave_val('./data/test')
        val_data_loader = DataLoader(dataset=val_data,
                                     num_workers=8,
                                     batch_size=16,
                                     shuffle=False,
                                     pin_memory=True)

        start_time = time.time()
        train_loss, iteration = train(train_data_loader, model, criterion,
                                      optimizer, iteration, device)

        val_loss = validate(val_data_loader, model, criterion, device)

        # Save model
        if epoch % 100 == 0:
            save_checkpoint(model_path, epoch, iteration, model, optimizer)

        # # Update learning rate
        for param_group in optimizer.param_groups:
            lr = param_group['lr']
        # scheduler.step(val_loss)

        # print loss
        end_time = time.time()
        epoch_time = end_time - start_time
        print(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, val_loss))
        # save loss
        record_loss(loss_csv, epoch, iteration, epoch_time, lr, train_loss,
                    val_loss)  # 调用record_方法:将epoch等6个指标写到csv文件中
        logger.info(
            "Epoch [%d], Iter[%d], Time:%.9f, learning rate : %.9f, Train Loss: %.9f Test Loss: %.9f "
            % (epoch, iteration, epoch_time, lr, train_loss, val_loss))