Esempio n. 1
0
from bitcoinutils.keys import P2pkhAddress, PrivateKey, PublicKey
import init
import consts

init.init_network()


class Id:
    """
    Helper class for handling identity related keys and addresses easily
    """
    def __init__(self, sk: str):
        self.sk = PrivateKey(secret_exponent=int(sk, 16))
        self.pk = self.sk.get_public_key()
        self.addr = self.pk.get_address().to_string()
        self.p2pkh = P2pkhAddress(self.addr).to_script_pub_key()
Esempio n. 2
0
    def __init__(self, args):
        self.args = args
        self.model_dir = args.model_dir
        self.param_dir = os.path.join(self.model_dir, 'params')
        self.tag = datetime.datetime.now().strftime(
            'training_%Y_%m_%d_%H_%M_%S')
        self.log_dir = os.path.join(self.model_dir, 'logs')
        self.log_file = os.path.join(self.log_dir, self.tag + '.log')
        self.anno_dir = os.path.join(self.model_dir, 'annotations')
        self.book_file = os.path.join(self.anno_dir, self.tag + '.csv')
        for d in (self.model_dir, self.log_dir, self.param_dir, self.anno_dir):
            if not os.path.exists(d): os.makedirs(d)
        self.logger = init_logger(self.log_file, args.log_level)

        self.context = [mx.gpu(i) for i in args.gpus
                        ] if len(args.gpus) > 0 else [mx.cpu()]

        self.dataset = args.name
        if self.dataset == 'cifar10':
            from iters import cifar10_iterator
            self.train_iter, self.val_iter, self.K, self.N = cifar10_iterator(
                args, self.logger)
            if args.question_set == 'wordnet':
                self.Q = cifar10_questions_wordnet()
            else:
                self.Q = cifar10_questions()
        elif self.dataset == 'cifar100':
            from iters import cifar100_iterator
            self.train_iter, self.val_iter, self.K, self.N = cifar100_iterator(
                args, self.logger)
            from questions import cifar100_questions
            if args.question_set == 'wordnet':
                self.Q = cifar100_questions_wordnet()
            else:
                self.Q = cifar100_questions()
        elif self.dataset == 'tinyimagenet200':
            from iters import tinyimagenet200_iterator
            self.train_iter, self.val_iter, self.K, self.N = tinyimagenet200_iterator(
                args, self.logger)
            if args.question_set == 'wordnet':
                self.Q = tinyimagenet200_questions_wordnet()
            else:
                raise Error('TinyImageNet has to use wordnet as hierarchy')
        else:
            raise NotImplementedError('%s is not supported' % self.dataset)

        self.train_iter.add('query', self.train_iter.seq, self.train_iter.cur)
        self.train_iter.add('optimization', [], 0)

        self.net = init_network(self.K, self.context, self.args)
        self.cnet = init_network(self.K, self.context, self.args)
        # run one batch to make sure it initializes
        self.net(
            nd.ones(self.train_iter.data_shape,
                    self.context[0]).expand_dims(0))
        self.cnet(
            nd.ones(self.train_iter.data_shape,
                    self.context[0]).expand_dims(0))

        # NOTE: if gpu0 runs out of memory in multigpu case, initialize trainier with device='local'
        # otherwise all resources will be put on gpu 0
        # https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/trainer.py#L54
        if args.optimizer == 'sgd':
            optargs = {
                'learning_rate': args.learning_rate,
                'momentum': args.momentum,
                'wd': args.weight_decay
            }
        elif args.optimizer == 'adam':
            optargs = {
                'learning_rate': args.learning_rate,
                'beta1': args.beta1,
                'beta2': args.beta2
            }

        # NOTE: added on 05/15 to prevent gradient explosion
        # HOPEFULLY this addresses the unstable problem.
        optargs['clip_gradient'] = 2.0

        self.trainer = mx.gluon.Trainer(self.net.collect_params(),
                                        args.optimizer, optargs)
        self.train_annotation = np.ones((self.N, self.K), dtype=np.bool)
        self.train_annotation_sum = np.full((self.N), self.K, dtype=np.float)
        self.train_prior = np.full((self.N, self.K), 1.0 / self.K)
        self.train_label = np.zeros(self.N, dtype=np.int)
        self.train_iter.switch('query')
        self.train_iter.reset()
        for batch in self.train_iter:
            index, label = batch.index[0], batch.label[0]
            self.train_label[index] = label.asnumpy()

        # iterator's basic settings
        self.batch_size = args.batch_size

        # NOTE: remove many awkward variants and leave the most core functionalities
        self.feedback_type = args.feedback_type
        self.active_instance = args.active_instance
        self.least_confident = args.least_confident
        self.active_question = args.active_question
        self.total_budget = args.total_budget
        self.round_budget = args.round_budget
        self.session_budget = args.session_budget if args.session_budget > 0 else self.round_budget
        self.score_rule = args.score_rule
        self.checkpoint_cost = args.checkpoint_cost
        self.total_cost = 0

        # NOTE: reinitialization helps fight against online learning's bias
        self.optimize_from_scratch = args.optimize_from_scratch
        self.max_optimize_epoch = args.max_optimize_epoch
        self.min_optimize_epoch = args.min_optimize_epoch
        self.reinit_round = args.reinit_round

        # NOTE: we want to start with a small set of data that is fully pre-labeled.
        self.prelabel_ratio = args.prelabel_ratio

        if self.prelabel_ratio > 0:
            indices = np.random.permutation(self.N)
            prelabel_num = int(self.N * self.prelabel_ratio)
            prelabel_inds = indices[:prelabel_num]
            for i in prelabel_inds:
                self.train_annotation[i] = False
                self.train_annotation[i][self.train_label[i]] = True
                self.train_annotation_sum[i] = 1

            self.logger.info('Pre-labeling examples %d %d ... %d %d' %
                             (prelabel_inds[0], prelabel_inds[1],
                              prelabel_inds[-2], prelabel_inds[-1]))

            self.train_iter.append_sequence(prelabel_inds, 'optimization')
            self.logger.info(
                'Adding %d prelabeled examples into optimization pool' %
                prelabel_num)