Exemple #1
0
from utils.testing import run_tests, compile_programs
from utils.tests import CombingTest

# CXX_COMPILER_PATH = '/usr/bin/g++-10'
CXX_COMPILER_PATH = '/usr/bin/g++'

SINGLE_THREADED_SOLUTIONS = []

MULTI_THREADED_SOLUTIONS = [
    # 'semi_local_hybrid_iterative'
    'semi_local_parallel_iterative'
]

SOLUTIONS_FOLDER = 'combing_solutions'  # where we put our ready to run implementations

default_logger = Logger('fig9_log.txt')
build_logger = Logger('fig9_build_log.txt')


def boolean_string(s):
    if s not in {'False', 'True'}:
        raise ValueError('Not a valid boolean string')
    return s == 'True'


if __name__ == '__main__':
    arg_parser = ArgumentParser()
    arg_parser.add_argument('max_thds', type=int, help='Threads')
    arg_parser.add_argument(
        'tests',
        type=str,
Exemple #2
0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @author: sharly


import configparser
import yaml
import openpyxl
import csv
import time
from utils.logger import Logger

logger = Logger(logger="BaseUtil").get_log()


class BaseUtil:
    def __init__(self):
        self.root_path = 'E:/PycharmProjects/wechat_test'

    def set_root_path(self, path):
        """设置项目的主path"""
        self.root_path = path

    def get_root_path(self):
        """获取项目的主path"""
        return self.root_path

    def get_config_value(self, section, key):
        """获取config文件中section的key值"""
        config_path = self.root_path + "/conf/config.ini"
        config = configparser.ConfigParser()
Exemple #3
0
def main(args):

    # args.n_epochs = 10
    # args.crop_scale = 0.3
    # args.batch_size = 128
    args.normal_data_ratio = 0.9

    if args.dataset == 'MNIST':
        args.sample_size = 28
    elif args.dataset == 'SOP' or 'Shopee':
        if args.model == 'resnet':
            args.sample_size = 224
        elif args.model == 'vgg' or args.model == 'vgg_attn':
            args.sample_size = 224
        elif args.model == 'inception':
            args.sample_size = 299
        else:
            args.sample_size = 224

    spatial_transform_train = get_train_transform(args)
    crop_transform = get_crop_transform(args)

    if args.dataset == 'MNIST':
        train_data_loader = CroppedMNISTLoader(args, crop_transform=crop_transform,
                                               spatial_transform=spatial_transform_train, training=True)

    elif args.dataset == 'SOP':
        train_data_loader = CroppedSOPLoader(args, crop_transform=crop_transform,
                                               spatial_transform=spatial_transform_train, training=True)

    elif args.dataset == 'Shopee':
        train_data_loader = ShopeeDataLoader(args, crop_transform=crop_transform,
                                               spatial_transform=spatial_transform_train, training=True)

    valid_data_loader = train_data_loader.split_validation()

    args.n_channels = train_data_loader.n_channels
    args.n_classes = train_data_loader.n_classes

    model, parameters = get_model(args)
    model = model.to(device)

    criterion = losses.cross_entropy_loss()

    train_logger = Logger(
        os.path.join(args.log_path, 'train.log'),
        ['epoch', 'loss', 'acc', 'lr'])
    train_batch_logger = Logger(
        os.path.join(args.log_path, 'train_batch.log'),
        ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])
    valid_logger = Logger(
        os.path.join(args.log_path, 'val.log'),
        ['epoch', 'loss', 'acc'])

    revision_logger = Logger(
        os.path.join(args.log_path, 'revision_info.log'),
        ['dataset', 'dataset_size', 'train_test_split', 'model', 'model_depth', 'resume', 'resume_path', 'batch_size',
         'n_epochs', 'sample_size', 'crop_scale', 'crop_transform', 'cropped_data_ratio'])
    revision_logger.log({
        'dataset': args.dataset,
        'dataset_size': args.dataset_size,
        'train_test_split': args.train_test_split,
        'model': args.model,
        'model_depth': args.model_depth,
        'resume': args.resume,
        'resume_path': args.resume_path,
        'batch_size': args.batch_size,
        'n_epochs': args.n_epochs,
        'sample_size': args.sample_size,
        'crop_scale': args.crop_scale,
        'crop_transform': crop_transform.__class__.__name__,
        'cropped_data_ratio': args.cropped_data_ratio
    })

    if args.nesterov:
        dampening = 0
    else:
        dampening = args.dampening

    optimizer = optim.SGD(
        parameters,
        lr=args.learning_rate,
        momentum=args.momentum,
        dampening=dampening,
        weight_decay=args.weight_decay,
        nesterov=args.nesterov)

    scheduler = lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min', patience=args.lr_patience)

    trainer = Trainer(model, criterion, optimizer, args, device, train_data_loader, lr_scheduler=scheduler,
                      valid_data_loader=valid_data_loader, train_logger=train_logger, batch_logger=train_batch_logger, valid_logger=valid_logger)

    trainer.train()