def do_client(idx, args):
    dataset = load_dataset('wmt14ende', splits=('test'))

    headers = {"Content-type": "application/json"}
    url = "http://127.0.0.1:9292/transformer/prediction"

    batch = []
    sample = 0
    f = open(args.output_file, "w")
    if args.profile:
        recorder = Recorder(args.infer_batch_size, args.model_name)
        recorder.tic()

    for sequence in dataset:
        sample += 1
        batch.append(sequence[args.src_lang])
        if len(batch) < args.infer_batch_size and sample != len(dataset):
            continue
        data = {"feed": [{"src_word": batch}], "fetch": ["finished_sequence"]}
        r = requests.post(url=url, headers=headers, data=json.dumps(data))
        if r is not None:
            print("Status: ", r)

            if args.profile:
                recorder.toc(samples=len(batch))
            else:
                for seq in r.json()["result"]["finished_sequence"]:
                    f.write(seq[0] + "\n")
            batch = []
        if args.profile:
            recorder.tic()
    f.close()
    if args.profile:
        recorder.report()
        return [[recorder.infer_time]]
예제 #2
0
    def get(self):
        self.driver.get(self.dic_url[self.sid])

        if self.sid == 'SWGS':
            try:
                self.driver.switch_to_alert()
                alert = WebDriverWait(self.driver, 1).until(EC.alert_is_present())
                alert.accept()
            except NoAlertPresentException:
                pass
            finally:
                while True:
                    try:
                        WebDriverWait(self.driver, 0).until(EC.element_to_be_clickable((By.ID, 'bodyBlock')))
                    except:
                        break

                try:
                    WebDriverWait(self.driver, 60).until(EC.presence_of_element_located((By.ID, 'mdi01_subWindow0_iframe')))
                finally:
                    print('Page is ready!')
                    self.driver.execute_script('''top.document.title = "(FOR AUTO TEST TOOL)"''')
                    # Recorder 선언
                    self.recorder = Recorder(self.driver)
                    self.recorder.addEventListener()
예제 #3
0
 def __init__(self, task, train_exp_dir, ite, logdir=None):
     self.task = task
     self.policy = LoadPolicy('../utils/models/{}/{}'.format(task, train_exp_dir), ite)
     self.env = CrossroadEnd2end(training_task=self.task, mode='testing')
     self.model = EnvironmentModel(self.task, mode='selecting')
     self.recorder = Recorder()
     self.episode_counter = -1
     self.step_counter = -1
     self.obs = None
     self.stg = MultiPathGenerator()
     self.step_timer = TimerStat()
     self.ss_timer = TimerStat()
     self.logdir = logdir
     if self.logdir is not None:
         config = dict(task=task, train_exp_dir=train_exp_dir, ite=ite)
         with open(self.logdir + '/config.json', 'w', encoding='utf-8') as f:
             json.dump(config, f, ensure_ascii=False, indent=4)
     self.fig = plt.figure(figsize=(8, 8))
     plt.ion()
     self.hist_posi = []
     self.old_index = 0
     self.path_list = self.stg.generate_path(self.task)
     # ------------------build graph for tf.function in advance-----------------------
     for i in range(3):
         obs = self.env.reset()
         obs = tf.convert_to_tensor(obs[np.newaxis, :], dtype=tf.float32)
         self.is_safe(obs, i)
     obs = self.env.reset()
     obs_with_specific_shape = np.tile(obs, (3, 1))
     self.policy.run_batch(obs_with_specific_shape)
     self.policy.obj_value_batch(obs_with_specific_shape)
     # ------------------build graph for tf.function in advance-----------------------
     self.reset()
예제 #4
0
    def run(self, category, im_list, output_fn="tmp.txt"):
        # Load checkpoint
        checkpoint_dir = "{}/{}/{}".format(self.main_dir, category, self.lr)
        checkpoint, _ = get_latest_checkpoint(checkpoint_dir)
        disc = Discriminator(lr=self.lr, checkpoint=checkpoint)

        output_dir = join(self.main_dir, "predictions")
        output_path = join(output_dir, output_fn)
        recorder = Recorder(output_path, restart=False)

        for im in im_list:
            if recorder.contains(im):
                print im, recorder.get(im), "Done."
                continue

            img, _ = self.datasource.get_image(im)
            gt, _ = self.datasource.get_ground_truth(im, one_hot=True)
            ap, _ = self.datasource.get_all_prob(im)
            pr_prob = disc.predict(img, ap, args.category)
            gt_prob = disc.predict(img, gt, args.category)

            print "{} {} {}".format(im, pr_prob, gt_prob)
            recorder.save(im, [pr_prob, gt_prob])
        recorder.write()
        return output_path
예제 #5
0
파일: base.py 프로젝트: familywei/RLs
    def generate_recorder(self, logger2file, graph):
        """
        create model/log/data dictionary and define writer to record training data.
        """

        self.check_or_create(self.cp_dir, 'checkpoints')
        self.check_or_create(self.log_dir, 'logs(summaries)')
        self.check_or_create(self.excel_dir, 'excel')
        self.recorder = Recorder(log_dir=self.log_dir,
                                 excel_dir=self.excel_dir,
                                 logger2file=logger2file,
                                 graph=graph)
예제 #6
0
def start():
    global recorder, logger, ConfigFile
    if recorder is None or recorder.is_stopped():
        logger.log('Creating new PCRC recorder')
        try:
            recorder = Recorder(ConfigFile, TranslationFolder)
        except YggdrasilError as e:
            logger.error(e)
            return
        ret = recorder.start()
        logger.log('Recorder started, success = {}'.format(ret))
    else:
        logger.warn('Recorder is running, ignore')
예제 #7
0
    def __init__(self, task):
        self.task = task
        if self.task == 'left':
            self.policy = LoadPolicy('G:\\env_build\\utils\\models\\left', 100000)
        elif self.task == 'right':
            self.policy = LoadPolicy('G:\\env_build\\utils\\models\\right', 145000)
        elif self.task == 'straight':
            self.policy = LoadPolicy('G:\\env_build\\utils\\models\\straight', 95000)

        self.horizon = 25
        self.num_future_data = 0
        self.env = CrossroadEnd2end(training_task=self.task, num_future_data=self.num_future_data)
        self.model = EnvironmentModel(self.task)
        self.obs = self.env.reset()
        self.stg = StaticTrajectoryGenerator_origin(mode='static_traj')
        self.data2plot = []
        self.mpc_cal_timer = TimerStat()
        self.adp_cal_timer = TimerStat()
        self.recorder = Recorder()
예제 #8
0
def setup_recorder(params):
    recorder = Recorder()
    # This is for early stopping, currectly I did not use it
    recorder.bad_counter = 0  # start from 0
    recorder.estop = False

    recorder.lidx = -1  # local data index
    recorder.step = 0  # global step, start from 0
    recorder.epoch = 1  # epoch number, start from 1
    recorder.history_scores = []
    recorder.valid_script_scores = []

    # trying to load saved recorder
    record_path = os.path.join(params.output_dir, "record.json")
    record_path = os.path.abspath(record_path)
    if tf.gfile.Exists(record_path):
        recorder.load_from_json(record_path)

    params.add_hparam('recorder', recorder)
    return params
예제 #9
0
 def __init__(self, task, logdir=None):
     self.task = task
     if self.task == 'left':
         self.policy = LoadPolicy('../utils/models/left', 100000)
     elif self.task == 'right':
         self.policy = LoadPolicy('../utils/models/right', 145000)
     elif self.task == 'straight':
         self.policy = LoadPolicy('../utils/models/straight', 95000)
     self.env = CrossroadEnd2end(training_task=self.task)
     self.model = EnvironmentModel(self.task, mode='selecting')
     self.recorder = Recorder()
     self.episode_counter = -1
     self.step_counter = -1
     self.obs = None
     self.stg = None
     self.step_timer = TimerStat()
     self.ss_timer = TimerStat()
     self.logdir = logdir
     self.fig = plt.figure(figsize=(8, 8))
     plt.ion()
     self.hist_posi = []
     self.reset()
예제 #10
0
    def create_predictor(cls,
                         args,
                         config=None,
                         profile=False,
                         model_name=None):
        if config is None:
            config = inference.Config(
                os.path.join(args.inference_model_dir, "transformer.pdmodel"),
                os.path.join(args.inference_model_dir,
                             "transformer.pdiparams"))
            if args.device == "gpu":
                config.enable_use_gpu(100, 0)
            elif args.device == "xpu":
                config.enable_xpu(100)
            else:
                # CPU
                config.disable_gpu()
                if args.use_mkl:
                    config.enable_mkldnn()
                    config.set_cpu_math_library_num_threads(args.threads)
            # Use ZeroCopy.
            config.switch_use_feed_fetch_ops(False)

        if profile:
            recorder = Recorder(config, args.infer_batch_size, model_name)
        else:
            recorder = None

        predictor = inference.create_predictor(config)
        input_handles = [
            predictor.get_input_handle(name)
            for name in predictor.get_input_names()
        ]
        output_handles = [
            predictor.get_input_handle(name)
            for name in predictor.get_output_names()
        ]
        return cls(predictor, input_handles, output_handles, recorder)
예제 #11
0
 def __init__(self, task, train_exp_dir, ite, logdir=None):
     self.task = task
     self.policy = LoadPolicy(
         '../utils/models/{}/{}'.format(task, train_exp_dir), ite)
     self.env = CrossroadEnd2end(training_task=self.task)
     self.model = EnvironmentModel(self.task, mode='selecting')
     self.recorder = Recorder()
     self.episode_counter = -1
     self.step_counter = -1
     self.obs = None
     self.stg = None
     self.step_timer = TimerStat()
     self.ss_timer = TimerStat()
     self.logdir = logdir
     if self.logdir is not None:
         config = dict(task=task, train_exp_dir=train_exp_dir, ite=ite)
         with open(self.logdir + '/config.json', 'w',
                   encoding='utf-8') as f:
             json.dump(config, f, ensure_ascii=False, indent=4)
     self.fig = plt.figure(figsize=(8, 8))
     plt.ion()
     self.hist_posi = []
     self.reset()
예제 #12
0
def plot_and_save_ith_episode_data(logdir, i):
    recorder = Recorder()
    recorder.load(logdir)
    save_dir = logdir + '/episode{}/figs'.format(i)
    os.makedirs(save_dir, exist_ok=True)
    recorder.plot_and_save_ith_episode_curves(i, save_dir, True)
예제 #13
0
def main():
    if sys.platform.startswith('win'):
        # Add the _win_handler function to the windows console's handler function list
        win32api.SetConsoleCtrlHandler(_win_handler, True)
    if os.path.exists(
            os.path.join(config_file.config['config_file'], 'config.yaml')):
        config = sth.load_config(config_file.config['config_file'])
    else:
        config = config_file.config
        print(f'load config from config.')

    hyper_config = config['hyper parameters']
    train_config = config['train config']
    record_config = config['record config']

    basic_dir = record_config['basic_dir']
    last_name = record_config['project_name'] + '/' \
        + record_config['remark'] \
        + record_config['run_id']
    cp_dir = record_config['checkpoint_basic_dir'] + last_name
    cp_file = cp_dir + '/rb'
    log_dir = record_config['log_basic_dir'] + last_name
    excel_dir = record_config['excel_basic_dir'] + last_name
    config_dir = record_config['config_basic_dir'] + last_name
    sth.check_or_create(basic_dir, 'basic')
    sth.check_or_create(cp_dir, 'checkpoints')
    sth.check_or_create(log_dir, 'logs(summaries)')
    sth.check_or_create(excel_dir, 'excel')
    sth.check_or_create(config_dir, 'config')

    logger = create_logger(
        name='logger',
        console_level=logging.INFO,
        console_format='%(levelname)s : %(message)s',
        logger2file=record_config['logger2file'],
        file_name=log_dir + '\log.txt',
        file_level=logging.WARNING,
        file_format=
        '%(lineno)d - %(asctime)s - %(module)s - %(funcName)s - %(levelname)s - %(message)s'
    )
    if train_config['train']:
        sth.save_config(config_dir, config)

    if train_config['unity_mode']:
        env = UnityEnvironment()
    else:
        env = UnityEnvironment(
            file_name=train_config['unity_file'],
            no_graphics=True if train_config['train'] else False,
            base_port=train_config['port'])
    brain_name = env.external_brain_names[0]
    brain = env.brains[brain_name]
    # set the memory use proportion of GPU
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    # tf_config.gpu_options.per_process_gpu_memory_fraction = 0.5
    tf.reset_default_graph()
    graph = tf.Graph()
    with graph.as_default() as g:
        with tf.Session(graph=g, config=tf_config) as sess:
            logger.info('Algorithm: {0}'.format(
                train_config['algorithm'].name))
            if train_config['algorithm'] == config_file.algorithms.ppo_sep_ac:
                from ppo.ppo_base import PPO_SEP
                model = PPO_SEP(sess=sess,
                                s_dim=brain.vector_observation_space_size,
                                a_counts=brain.vector_action_space_size[0],
                                hyper_config=hyper_config)
                logger.info('PPO_SEP initialize success.')
            elif train_config['algorithm'] == config_file.algorithms.ppo_com:
                from ppo.ppo_base import PPO_COM
                model = PPO_COM(sess=sess,
                                s_dim=brain.vector_observation_space_size,
                                a_counts=brain.vector_action_space_size[0],
                                hyper_config=hyper_config)
                logger.info('PPO_COM initialize success.')
            elif train_config['algorithm'] == config_file.algorithms.sac:
                from sac.sac import SAC
                model = SAC(sess=sess,
                            s_dim=brain.vector_observation_space_size,
                            a_counts=brain.vector_action_space_size[0],
                            hyper_config=hyper_config)
                logger.info('SAC initialize success.')
            elif train_config['algorithm'] == config_file.algorithms.sac_no_v:
                from sac.sac_no_v import SAC_NO_V
                model = SAC_NO_V(sess=sess,
                                 s_dim=brain.vector_observation_space_size,
                                 a_counts=brain.vector_action_space_size[0],
                                 hyper_config=hyper_config)
                logger.info('SAC_NO_V initialize success.')
            elif train_config['algorithm'] == config_file.algorithms.ddpg:
                from ddpg.ddpg import DDPG
                model = DDPG(sess=sess,
                             s_dim=brain.vector_observation_space_size,
                             a_counts=brain.vector_action_space_size[0],
                             hyper_config=hyper_config)
                logger.info('DDPG initialize success.')
            elif train_config['algorithm'] == config_file.algorithms.td3:
                from td3.td3 import TD3
                model = TD3(sess=sess,
                            s_dim=brain.vector_observation_space_size,
                            a_counts=brain.vector_action_space_size[0],
                            hyper_config=hyper_config)
                logger.info('TD3 initialize success.')
            recorder = Recorder(log_dir,
                                excel_dir,
                                record_config,
                                logger,
                                max_to_keep=5,
                                pad_step_number=True,
                                graph=g)
            episode = init_or_restore(cp_dir, sess, recorder, cp_file)
            try:
                if train_config['train']:
                    train_OnPolicy(
                        sess=sess,
                        env=env,
                        brain_name=brain_name,
                        begin_episode=episode,
                        model=model,
                        recorder=recorder,
                        cp_file=cp_file,
                        hyper_config=hyper_config,
                        train_config=train_config) if not train_config[
                            'use_replay_buffer'] else train_OffPolicy(
                                sess=sess,
                                env=env,
                                brain_name=brain_name,
                                begin_episode=episode,
                                model=model,
                                recorder=recorder,
                                cp_file=cp_file,
                                hyper_config=hyper_config,
                                train_config=train_config)
                    tf.train.write_graph(g,
                                         cp_dir,
                                         'raw_graph_def.pb',
                                         as_text=False)
                    export_model(cp_dir, g)
                else:
                    inference(env, brain_name, model, train_config)
            except Exception as e:
                logger.error(e)
            finally:
                env.close()
    recorder.close()
    sys.exit()
예제 #14
0
target_summary_path = '%s/runs-%s/CR%.2f' %(save_root, target_dataset_name, 100 * overall_CR)

for SummaryPath in [source_summary_path, target_summary_path]:

    if args.exp_spec is not '':
        SummaryPath += ('-' + args.exp_spec)

    if os.path.exists(SummaryPath):
        print('Record exist, remove')
        input()
        shutil.rmtree(SummaryPath)
        os.makedirs(SummaryPath)
    else:
        os.makedirs(SummaryPath)

source_recorder = Recorder(SummaryPath=source_summary_path, dataset_name=source_dataset_name)
target_recorder = Recorder(SummaryPath=target_summary_path, dataset_name=source_dataset_name)
alpha_change_point_file = open('%s/alpha_change_point.txt' %(target_summary_path), 'w+')

##################
# Begin Training #
##################
best_acc_list = [] # Best test acc in each training period under various alpha
niter = 0
for ite, CR_ratio in enumerate(CR_list):

    alpha = alpha_list[ite]

    print('Adaptive iteration: %d, alpha: %.3f' % (ite, alpha))
    print('Current CR: %s' % CR_ratio)
    print('niter: %d' %niter)
if use_cuda:
    net.cuda()
    # net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True

# ---
# Begin Training
# ---
ascent_count = 0
min_train_loss = 1e9
criterion = nn.CrossEntropyLoss()
max_training_epoch = args.max_epoch

# Initialize recorder for general training
recorder = Recorder(SummaryPath=save_root)
recorder.write_arguments([args])
# Initialize recorder for threshold
weight_quantization_error_recorder_collection = {}
input_quantization_error_recorder_collection = {}
weight_bit_allocation_collection = {}
input_bit_allocation_collection = {}
for name, layer in net.quantized_layer_collections.items():
    if not os.path.exists('%s/%s' % (save_root, name)):
        os.makedirs('%s/%s' % (save_root, name))
    weight_quantization_error_recorder_collection[name] = open('%s/%s/weight_quantization_error.txt' % (save_root, name), 'a+')
    input_quantization_error_recorder_collection[name] = open('%s/%s/input_quantization_error.txt' % (save_root, name), 'a+')
    weight_bit_allocation_collection[name] = open('%s/%s/weight_bit_allocation.txt' % (save_root, name), 'a+')
    input_bit_allocation_collection[name] = open('%s/%s/input_bit_allocation.txt' % (save_root, name), 'a+')

for epoch in range(start_epoch, start_epoch + max_training_epoch):
예제 #16
0
import wave, audioop, time
from aip import AipSpeech
from utils.recorder import Recorder
from utils import gmm_vad
import threading

UPLOAD_PATH = os.path.join(basedir, 'static/uploads/')
VOICES_PATH = os.path.join(UPLOAD_PATH, 'voices/')
TEMP_PATH = os.path.join(UPLOAD_PATH, 'temps/')
ALGORITHM_PATH = os.path.join(UPLOAD_PATH, 'algorithms')
MODELS_PATH = os.path.join(UPLOAD_PATH, 'models')
sys.path.append(ALGORITHM_PATH)
sys.path.append(MODELS_PATH)

started_list = []
rec = Recorder()


class TokenResource(Resource):
    # decorators = [auth.login_required]
    def get(self):
        token = g.user.generate_auth_token()
        return jsonify({'token': token.decode('ascii')})


class LoginResource(Resource):
    def post(self):
        username = request.json['username']
        password = request.json['password']
        result = {}
        if (self.verify_auth(username, password)):
예제 #17
0
from utils.recorder import Recorder
from sorting.merge_sort import merge_sort
from sorting.merge_sort_iterative import merge_sort_iter
from sorting.quick_sort import quick_sort
from sorting.quick_sort_3way import quick_sort_3way
from sorting.selection_sort import selection_sort
from sorting.insertion_sort import insert_sort_optimized
from sorting.insertion_sort import insert_sort
import numpy as np

from time import time
from threading import Thread

if __name__ == '__main__':
    s = time()
    recorder = Recorder()
    start, end = 2, 6

    for x in range(start, end):
        data = list(np.random.rand(np.power(10, x)))

        p1 = Thread(target=recorder.execute, args=(selection_sort, data))
        p2 = Thread(target=recorder.execute, args=(insert_sort, data))

        p3 = Thread(target=recorder.execute,
                    args=(insert_sort_optimized, data))
        p4 = Thread(target=recorder.execute,
                    args=(merge_sort, data, [0, len(data) - 1]))
        p5 = Thread(target=recorder.execute, args=(merge_sort_iter, data))
        p6 = Thread(target=recorder.execute,
                    args=(quick_sort, data, [0, len(data) - 1]))
예제 #18
0
# Initial Recorder #
####################
if args.exp_spec is not '':
    SummaryPath += ('-' + args.exp_spec)

print('Save to %s' % SummaryPath)

if os.path.exists(SummaryPath):
    print('Record exist, remove')
    input()
    shutil.rmtree(SummaryPath)
    os.makedirs(SummaryPath)
else:
    os.makedirs(SummaryPath)

recorder = Recorder(SummaryPath=SummaryPath, dataset_name=dataset_name)

##################
# Begin Training #
##################
meta_grad_dict = dict()
for epoch in range(MAX_EPOCH):

    if recorder.stop: break

    print('\nEpoch: %d, lr: %e' % (epoch, optimizee.param_groups[0]['lr']))

    net.train()
    end = time.time()

    recorder.reset_performance()
예제 #19
0
def plot_data(logdir, i):
    recorder = Recorder()
    recorder.load(logdir)
    recorder.plot_ith_episode_curves(i)
예제 #20
0
    def __init__(self, task_name, task_type = 'prune', optimizer_type = 'adam',
                 save_root = None, SummaryPath = None, use_cuda = True, **kwargs):

        self.task_name = task_name
        self.task_type = task_type # prune, soft-quantize
        self.model_name, self.dataset_name = task_name.split('-')
        self.ratio = 'sample' if self.dataset_name in ['CIFARS'] else -1

        #######
        # Net #
        #######
        if task_type == 'prune':
            if self.model_name == 'ResNet20':
                if self.dataset_name in ['CIFAR10', 'CIFARS']:
                    self.net = resnet20_cifar()
                elif self.dataset_name == 'STL10':
                    self.net = resnet20_stl()
                else:
                    raise NotImplementedError
            elif self.model_name == 'ResNet32':
                if self.dataset_name in ['CIFAR10', 'CIFARS']:
                    self.net = resnet32_cifar()
                elif self.dataset_name == 'STL10':
                    self.net = resnet32_stl()
                else:
                    raise NotImplementedError
            elif self.model_name == 'ResNet56':
                if self.dataset_name in ['CIFAR10', 'CIFARS']:
                    self.net = resnet56_cifar()
                elif self.dataset_name == 'CIFAR100':
                    self.net = resnet56_cifar(num_classes=100)
                elif self.dataset_name == 'STL10':
                    self.net = resnet56_stl()
                else:
                    raise NotImplementedError
            elif self.model_name == 'ResNet18':
                if self.dataset_name == 'ImageNet':
                    self.net = resnet18()
                else:
                    raise NotImplementedError
            elif self.model_name == 'vgg11':
                self.net = vgg11() if self.dataset_name == 'CIFAR10' else vgg11_stl10()
            else:
                print(self.model_name, self.dataset_name)
                raise NotImplementedError
        elif task_type == 'soft-quantize':
            if self.model_name == 'ResNet20':
                if self.dataset_name in ['CIFAR10', 'CIFARS']:
                    self.net = soft_quantized_resnet20_cifar()
                elif self.dataset_name in ['STL10']:
                    self.net = soft_quantized_resnet20_stl()
            else:
                raise NotImplementedError
        else:
            raise ('Task type not defined.')


        self.meta_opt_flag = True # True for enabling meta leraning

        ##############
        # Meta Prune #
        ##############
        self.mask_dict = dict()
        self.meta_grad_dict = dict()
        self.meta_hidden_state_dict = dict()

        ######################
        # Meta Soft Quantize #
        ######################
        self.quantized = 0 # Quantized type
        self.alpha_dict = dict()
        self.alpha_hidden_dict = dict()
        self.sq_rate = 0
        self.s_rate = 0
        self.q_rate = 0

        ##########
        # Record #
        ##########
        self.dataset_type = 'large' if self.dataset_name in ['ImageNet'] else 'small'
        self.SummaryPath = SummaryPath
        self.save_root = save_root

        self.recorder = Recorder(self.SummaryPath, self.dataset_name, self.task_name)

        ####################
        # Load Pre-trained #
        ####################
        self.pretrain_path = '%s/%s-pretrain.pth' %(self.save_root, self.task_name)
        self.net.load_state_dict(torch.load(self.pretrain_path))
        print('Load pre-trained model from %s' %self.pretrain_path)

        if use_cuda:
            self.net.cuda()

        # Optimizer for this task
        if optimizer_type in ['Adam', 'adam']:
            self.optimizer = Adam(self.net.parameters(), lr=1e-3)
        else:
            self.optimizer = SGD(self.net.parameters())

        if self.dataset_name == 'ImageNet':
            try:
                self.train_loader = get_lmdb_imagenet('train', 128)
                self.test_loader = get_lmdb_imagenet('test', 100)
            except:
                self.train_loader = get_dataloader(self.dataset_name, 'train', 128)
                self.test_loader = get_dataloader(self.dataset_name, 'test', 100)
        else:
            self.train_loader = get_dataloader(self.dataset_name, 'train', 128, ratio=self.ratio)
            self.test_loader = get_dataloader(self.dataset_name, 'test', 128)

        self.iter_train_loader = yielder(self.train_loader)
예제 #21
0
def plot_data(epi_num, logdir):
    recorder = Recorder()
    recorder.load(logdir)
    recorder.plot_mpc_rl(epi_num)
예제 #22
0
                  eps=adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=warmup_steps,
    num_training_steps=len(train_dataloader) * n_epoch)

# -------
# Initialize Recorder
# -------
SummaryPath = './Results/ALBERT-GLUE-%s/%s/%s/runs-%s-%d' % (
    task_name.upper(), 'Prune' if args.prune else 'Quant', args.model_type,
    'CR' if args.prune else 'bitW',
    int(100.0 * args.CR) if args.prune else args.bitW)
if args.exp_spec is not None:
    SummaryPath += ('-' + args.exp_spec)
recorder = Recorder(SummaryPath)
if recorder is not None:
    recorder.write_arguments([args])

if args.first_eval:
    result = evaluate(task_name, model, eval_dataloader,
                      model_type)  # ['acc', 'f1', 'acc_f1']
    print(result)

# --------------
# Begin Training
# --------------
for epoch_idx in range(n_epoch):

    print('Epoch: %d' % epoch_idx)
    model.train()