Пример #1
0
    def __init__(self, conf, default_transport, formatter, debug):
        """Creates an instance of metrics from the configuration."""
        self._ready = False
        self._token = conf.token

        if debug and not default_transport:
            self._transport = StderrTransport(None)
        elif debug:
            self._transport = StderrTransport(default_transport.get())
        else:
            self._transport = default_transport.get()
        self._formatter = formatter
        self._debug = debug

        self._timer = None
        self._shutdown = False
        self._interval = self._parse_interval(conf.interval)
        if self._interval == 0:
            report("Warning: Cannot instantiate metrics, invalid interval `%s'." % conf.interval)

        if psutil_available:
            self._items = self._instantiate_system(conf, debug) + self._instantiate_processes(conf, debug)
        else:
            if debug:
                report("Warning: Cannot instantiate system metrics, psutil library is not available.")
            self._items = []
            return

        self._ready = True
Пример #2
0
 def report(self):
     utils.report("Debt payments (monthly)", self.payment,
                  utils.sum(self.payment))
     if (len(self.payment.items()) > 1):
         utils.piechart("Payments", "Your debt payments",
                        [n for n, v in self.payment.items()],
                        [v for n, v in self.payment.items()])
Пример #3
0
def check_bib(options):
    '''Main driver.'''
    defined = get_definitions(options.bibliography)
    check_order(defined)
    check_keys(defined)
    cited = utils.get_all_matches(utils.CITATION, options.sources)
    utils.report('bibliography', cited=cited, defined=set(defined))
Пример #4
0
    def _instantiate_system(self, conf, debug):
        if not conf.token:
            if debug:
                report("Warning: Cannot instantiate system metrics, token not specified.")
            return []
        items = []
        if conf.cpu:
            if conf.cpu in ['core', 'system']:
                items.append( CpuMetrics(conf.cpu == 'core', self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized cpu option `%s', `core' or `system' expected" % conf.cpu)
        if conf.vcpu:
            if conf.vcpu == 'core':
                items.append( VcpuMetrics(self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized vcpu option `%s', `core' expected" % conf.vcpu)
        if conf.mem:
            if conf.mem == 'system':
                items.append( MemMetrics(self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized mem option `%s', `system' expected" % conf.mem)
        if conf.swap:
            if conf.swap == 'system':
                items.append(SwapMetrics(self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized swap option `%s', `system' expected" % conf.swap)
        if conf.disk:
            items.append(DiskIoMetrics(conf.disk, self._interval, self._transport, self._formatter))
        if conf.space:
            items.append(DiskSpaceMetrics(conf.space, self._interval, self._transport, self._formatter))
        if conf.net:
            items.append(NetMetrics(conf.net, self._interval, self._transport, self._formatter))

        return items
Пример #5
0
def q4(argv=None):

    dataset, method_name, k, latex = wrapper_args(
        argv, 'q4',
        ['Digits'],
        ['MyLogisticRegGen', 'LogisticRegression'])

    Digits_X, Digits_y = prepare_digits(want_noise=False)

    default_order = [
        ('MyLogisticRegGen', 'Digits'),
        ('LogisticRegression', 'Digits')
    ]
    methods = {
        ('MyLogisticRegGen', 'Digits'):
        (MyLogisticRegGen(verbose=False), Digits_X, Digits_y),
        ('LogisticRegression', 'Digits'):
        (LogisticRegression(), Digits_X, Digits_y)
    }

    if dataset == 'all':
        order = default_order
    else:
        order = [(method_name, dataset)]

    for key in order:
        name, dataset = key
        method, X, y = methods[key]
        print('==============')
        print('method: {}, dataset: {}'.format(key[0], key[1]))
        scores = my_cross_val(method, X, y, k)
        report(name, dataset, scores, latex=latex)
Пример #6
0
def check_links(options):
    '''Main driver.'''
    config = utils.read_yaml(options.config)
    defined = get_keys(config)
    referenced = utils.get_all_matches(LINK_REF, options.sources)
    utils.report('links', referenced=referenced, defined=defined)
    check_duplicates(options)
Пример #7
0
    def mid_eval(self, epoch, step, **kwargs):
        assert self.session is not None
        self.print_f('--- Start eval ---')
        sum_step = kwargs['steps'] * epoch + step
        self.tfboard_train_writer.add_summary(kwargs['train_summaries'],
                                              sum_step)

        # if epoch > 0 and (epoch % kwargs.get('eval_freq', 4) == 0 or epoch == kwargs.get('epochs', 15)):

        assert self.session is not None
        sum_step = kwargs['steps'] * epoch + step
        self.tfboard_train_writer.add_summary(kwargs['train_summaries'],
                                              sum_step)

        # if epoch > 0 and (epoch % kwargs.get('eval_freq', 4) == 0 or epoch == kwargs.get('epochs', 15)):
        # self.test_size = 500
        # if len(self.X_val) < self.test_size:
        # 	self.test_size = len(self.X_val)
        #
        # self.test_idx = np.arange(0, self.test_size)
        # np.random.shuffle(self.test_idx)
        data = self.X_val
        # data = self.X_val[self.test_idx]
        # labels = self.y_val[self.test_idx]
        labels = self.y_val

        preds, metrics = self.eval(data, labels)
        print(metrics)
        loss = metrics[1]
        acc = metrics[0]
        summaries = tf.Summary()
        summaries.value.add(tag="accuracy_scalar", simple_value=acc)
        summaries.value.add(tag="loss_scalar", simple_value=loss)

        pred_val = np.argmax(preds, axis=1)
        self.true_val = np.argmax(labels, axis=1)

        # if self.true_val is None:
        utils.report(self.true_val,
                     pred_val,
                     self.label_set,
                     epoch=sum_step,
                     print_fn=self.print_f,
                     loss=loss)
        self.tfboard_test_writer.add_summary(summaries, sum_step)

        # change learning rate
        if loss <= self.eval_loss_max:
            self.eval_loss_max = loss
            self.eval_loss_max_count = self.learning_rate_tolerance
        else:
            self.eval_loss_max_count -= 1

        if self.eval_loss_max_count == 0:
            self.eval_loss_max_count = self.learning_rate_tolerance
            self.learning_rate *= self.learning_rate_decay
            self.print_f('### update learning rate to {}'.format(
                self.learning_rate))

        self.print_f('--- Finish eval ----')
Пример #8
0
def q3(argv=None):

    dataset, method_name, k, latex = wrapper_args(
        argv, 'q3', ['Boston50', 'Boston75', 'Digits'])

    x_boston, y_boston = load_dataset(load_boston)
    x_digits, y_digits = prepare_digits(True)
    x_boston_50, y_boston_50 = percentileAssignment(50, x_boston, y_boston)
    x_boston_75, y_boston_75 = percentileAssignment(75, x_boston, y_boston)

    default_order = [('MultiGaussClassify_WithFullMatrix', 'Boston50'),
                     ('MultiGaussClassify_WithFullMatrix', 'Boston75'),
                     ('MultiGaussClassify_WithFullMatrix', 'Digits'),
                     ('MultiGaussClassify_WithDiagonal', 'Boston50'),
                     ('MultiGaussClassify_WithDiagonal', 'Boston75'),
                     ('MultiGaussClassify_WithDiagonal', 'Digits'),
                     ('LogisticRegression', 'Boston50'),
                     ('LogisticRegression', 'Boston75'),
                     ('LogisticRegression', 'Digits')]

    methods = {
        ('MultiGaussClassify_WithFullMatrix', 'Boston50'):
        (MultiGaussClassify(len(np.unique(y_boston_50)),
                            x_boston_50.shape[1]), x_boston_50, y_boston_50),
        ('MultiGaussClassify_WithFullMatrix', 'Boston75'):
        (MultiGaussClassify(len(np.unique(y_boston_75)),
                            x_boston_50.shape[1]), x_boston_75, y_boston_75),
        ('MultiGaussClassify_WithFullMatrix', 'Digits'):
        (MultiGaussClassify(len(np.unique(y_digits)),
                            x_digits.shape[1]), x_digits, y_digits),
        ('MultiGaussClassify_WithDiagonal', 'Boston50'):
        (MultiGaussClassify(len(np.unique(y_boston_50)), x_boston_50.shape[1],
                            True), x_boston_50, y_boston_50),
        ('MultiGaussClassify_WithDiagonal', 'Boston75'):
        (MultiGaussClassify(len(np.unique(y_boston_75)), x_boston_50.shape[1],
                            True), x_boston_75, y_boston_75),
        ('MultiGaussClassify_WithDiagonal', 'Digits'):
        (MultiGaussClassify(len(np.unique(y_digits)), x_digits.shape[1],
                            True), x_digits, y_digits),
        ('LogisticRegression', 'Boston50'): (LogisticRegression(), x_boston_50,
                                             y_boston_50),
        ('LogisticRegression', 'Boston75'): (LogisticRegression(), x_boston_75,
                                             y_boston_75),
        ('LogisticRegression', 'Digits'): (LogisticRegression(), x_digits,
                                           y_digits)
    }

    if dataset == 'all':
        order = default_order
    else:
        order = [(method_name, dataset)]

    for key in order:
        name, dataset = key
        method, X, y = methods[key]
        print('==============')
        print('method: {}, dataset: {}'.format(key[0], key[1]))
        scores = my_cross_val(method, X, y, k)
        report(name, dataset, scores, True)
Пример #9
0
	def project(self, rate, years):
		d = dict()
		d = self.income
		fv = utils.project(d, rate, years)
		t = Tax(d)
		t.credits = self.credits
		t.calculate()
		utils.report("Projected Tax", t.credits, t.total)
Пример #10
0
def check_spelling(options):
    '''Main driver.'''
    actual = {word.strip() for word in sys.stdin.readlines()}
    expected = {
        word.strip()
        for word in open(options.compare, 'r').readlines()
    }
    utils.report('spelling', actual=actual, expected=expected)
Пример #11
0
def check_gloss(options):
    '''Main driver.'''
    glossary = utils.read_yaml(options.glossary)
    check_keys(glossary, options.language)
    check_order(glossary, options.language)
    defined = get_definitions(glossary)
    referenced = utils.get_all_matches(utils.GLOSS_REF, options.sources, no_duplicates=True)
    referenced |= get_internal(glossary, options.language)
    utils.report('glossary', referenced=referenced, defined=defined)
Пример #12
0
def q3ii(argv=None):
    dataset, method_name, k, pi, latex = wrapper_args(
        argv, 'q3ii', ['Boston50', 'Boston75', 'Digits'], include_pi=True)

    x_boston, y_boston = load_dataset(load_boston)
    x_digits, y_digits = load_dataset(load_digits)
    x_boston_50, y_boston_50 = percentileAssignment(50, x_boston, y_boston)
    x_boston_75, y_boston_75 = percentileAssignment(75, x_boston, y_boston)

    default_order = [('LinearSVC', 'Boston50'), ('LinearSVC', 'Boston75'),
                     ('LinearSVC', 'Digits'), ('SVC', 'Boston50'),
                     ('SVC', 'Boston75'), ('SVC', 'Digits'),
                     ('LogisticRegression', 'Boston50'),
                     ('LogisticRegression', 'Boston75'),
                     ('LogisticRegression', 'Digits')]

    methods = {
        ('LinearSVC', 'Boston50'):
        (LinearSVC(max_iter=2000), x_boston_50, y_boston_50),
        ('LinearSVC', 'Boston75'): (LinearSVC(max_iter=2000), x_boston_75,
                                    y_boston_75),
        ('LinearSVC', 'Digits'): (LinearSVC(max_iter=2000), x_digits,
                                  y_digits),
        ('SVC', 'Boston50'): (SVC(gamma='scale',
                                  C=10), x_boston_50, y_boston_50),
        ('SVC', 'Boston75'): (SVC(gamma='scale',
                                  C=10), x_boston_75, y_boston_75),
        ('SVC', 'Digits'): (SVC(gamma='scale', C=10), x_digits, y_digits),
        ('LogisticRegression', 'Boston50'):
        (LogisticRegression(penalty='l2',
                            solver='lbfgs',
                            multi_class='multinomial',
                            max_iter=5000), x_boston_50, y_boston_50),
        ('LogisticRegression', 'Boston75'):
        (LogisticRegression(penalty='l2',
                            solver='lbfgs',
                            multi_class='multinomial',
                            max_iter=5000), x_boston_75, y_boston_75),
        ('LogisticRegression', 'Digits'):
        (LogisticRegression(penalty='l2',
                            solver='lbfgs',
                            multi_class='multinomial',
                            max_iter=5000), x_digits, y_digits)
    }

    if dataset == 'all':
        order = default_order
    else:
        order = [(method_name, dataset)]

    for key in order:
        name, dataset = key
        method, X, y = methods[key]
        print('==============')
        print('method: {}, dataset: {}'.format(key[0], key[1]))
        scores = my_train_test(method, X, y, 0.75, k)
        report(name, dataset, scores, latex=latex)
Пример #13
0
 def check_marks(self, lineno):
     for row in self._worldmap._map:
         for cell in row:
             if cell != '#':
                 if int(cell) > 9:
                     utils.report(
                         self._instruction_file, lineno,
                         "Program ended with more than 9 marks in a ")
                     return -1
     return 0
Пример #14
0
 def project(self, years):
     fd = dict()
     fv = 0.0
     for n, v in self.data.items():
         d = dict()
         (val, r) = v
         d[n] = val
         fv += utils.project(d, r, years)
         fd[n] = d[n]
     utils.report("Projected Assets", fd, fv)
     return fd
Пример #15
0
 def _render_once(self, data):
     new_apps = get_apps_from_lainlet(data)
     logger.info('>>>>>>>>>> render start <<<<<<<<<<')
     logger.debug('NginxConf start')
     nginx_conf = NginxConf(new_apps, self._ssl, self._extra_domains)
     nginx_conf.render_to_buffer()
     nginx_conf.sync_from_buffer()
     logger.debug('NginxConf finish')
     logger.info('>>>>>>>>>> render finish <<<<<<<<<<')
     self._reload_nginx()
     report("%s.webrouter.reload.%s.count" % (KEY_PREFIX, INSTANCE_NO), 1,
            int(time.time()))
Пример #16
0
    def _collect_metrics(self):
        ethalon = time.time()

        for x in self._items:
            try:
                x.collect()
            except Exception, e:
                # Make sure we don't propagate any unexpected exceptions
                # Typically `permission denied' on hard-ended systems
                if self._debug:
                    report("Warning: `%s'" % e)
                    report(''.join(traceback.format_tb(sys.exc_info()[2])))
Пример #17
0
    def _instantiate_processes(self, conf, debug):
        items = []
        for process in conf.processes:
            name = process[0]
            token = process[2]
            if not token:
                if debug:
                    report("Warning: Cannot instantiate metrics for `%s', token not specified." % name)
            else:
                items.append(ProcMetrics(name, process[1], token, self._interval, self._transport, self._formatter))

        return items
Пример #18
0
    def _collect_metrics(self):
        ethalon = time.time()

        for x in self._items:
            try:
                x.collect()
            except Exception, e:
                # Make sure we don't propagate any unexpected exceptions
                # Typically `permission denied' on hard-ended systems
                if self._debug:
                    report("Warning: `%s'" % e)
                    report(''.join(traceback.format_tb(sys.exc_info()[2])))
Пример #19
0
 def _render_once(self, data):
     new_apps = get_apps_from_lainlet(data)
     logger.info('>>>>>>>>>> render start <<<<<<<<<<')
     logger.debug('NginxConf start')
     nginx_conf = NginxConf(new_apps, self._ssl, self._extra_domains)
     nginx_conf.render_to_buffer()
     nginx_conf.sync_from_buffer()
     logger.debug('NginxConf finish')
     logger.info('>>>>>>>>>> render finish <<<<<<<<<<')
     self._reload_nginx()
     report("%s.webrouter.reload.%s.count" %
            (KEY_PREFIX, INSTANCE_NO), 1, int(time.time()))
Пример #20
0
def validate_nginx_config():
    step = 10
    key = "%s.webrouter.syntax_valid.%s" % (KEY_PREFIX, INSTANCE_NO)
    with open(os.devnull, 'w') as out:
        while True:
            result = 1
            try:
                subprocess.check_call(["nginx", "-t"], stdout=out, stderr=out)
            except:
                result = 0
            report(key, result, int(time.time()))
            time.sleep(step)
Пример #21
0
def q3i(argv=None):

    dataset, method_name, k, latex = wrapper_args(
        argv, 'q3i', ['Boston50', 'Boston75', 'Digits'])

    Boston50_X, Boston50_y = prepare_boston50()
    Boston75_X, Boston75_y = prepare_boston75()
    Digits_X, Digits_y = prepare_digits()

    default_order = [
        ('LinearSVC', 'Boston50'),
        ('LinearSVC', 'Boston75'),
        ('LinearSVC', 'Digits'),
        ('SVC', 'Boston50'),
        ('SVC', 'Boston75'),
        ('SVC', 'Digits'),
        ('LogisticRegression', 'Boston50'),
        ('LogisticRegression', 'Boston75'),
        ('LogisticRegression', 'Digits')
    ]

    methods = {('LinearSVC', 'Boston50'):
               (LinearSVC(), Boston50_X, Boston50_y),
               ('LinearSVC', 'Boston75'):
               (LinearSVC(), Boston75_X, Boston75_y),
               ('LinearSVC', 'Digits'):
               (LinearSVC(), Digits_X, Digits_y),
               ('SVC', 'Boston50'):
               (SVC(), Boston50_X, Boston50_y),
               ('SVC', 'Boston75'):
               (SVC(), Boston75_X, Boston75_y),
               ('SVC', 'Digits'):
               (SVC(), Digits_X, Digits_y),
               ('LogisticRegression', 'Boston50'):
               (LogisticRegression(), Boston50_X, Boston50_y),
               ('LogisticRegression', 'Boston75'):
               (LogisticRegression(), Boston75_X, Boston75_y),
               ('LogisticRegression', 'Digits'):
               (LogisticRegression(), Digits_X, Digits_y)}

    if dataset == 'all':
        order = default_order
    else:
        order = [(method_name, dataset)]

    for key in order:
        name, dataset = key
        method, X, y = methods[key]
        print('==============')
        print('method: {}, dataset: {}'.format(key[0], key[1]))
        scores = my_cross_val(method, X, y, k)
        report(name, dataset, scores, latex=latex)
Пример #22
0
 def report(self):
     utils.report("Budget (monthly)", self.data, self.surplus)
     if (self.surplus < 0.0):
         output.warn(
             "Spending exceeds available income by %.2f. Please readjust." %
             -(self.surplus))
     else:
         output.info("You have surplus %.2f to invest." % (self.surplus))
     pie_n = [n for n, v in self.data.items() if (n != "income")]
     pie_v = [abs(v) for n, v in self.data.items() if (n != "income")]
     pie_n.append("surplus")
     pie_v.append(self.surplus)
     utils.piechart("Budget", "Budget Allocation", pie_n, pie_v)
Пример #23
0
def run_training(
        dataset, 
        val_dataset,
        extra_val_dataset=None,
        n_updates=3000,
        lambda_final=1.,
        lambda_steps=2000,
        batch_size=100):

    # unpack shapes
    n_src_samples, input_dim = dataset.xs.shape
    n_tgt_samples, _ = dataset.xt.shape

    # decide the number of epochs so as to achieve a specified number of updates
    n_samples = max(n_src_samples, n_tgt_samples)
    updates_per_epoch = (n_samples / batch_size)
    n_epochs = int(n_updates // updates_per_epoch)

    # tf setup
    tf.reset_default_graph()
    sess = tf.InteractiveSession()

    # build the model and initialize
    model = DANN(
        input_dim=input_dim, 
        output_dim=2,  
        lambda_final=lambda_final,
        lambda_steps=lambda_steps,
        dropout_keep_prob=.5,
        encoder_hidden_layer_dims=(256,128,64,64),
        classifier_hidden_layer_dims=()
    )

    sess.run(tf.global_variables_initializer())

    # train the model
    model.train(dataset, n_epochs=n_epochs)

    # evaluate the model
    train_info = utils.evaluate(model, dataset)
    val_info = utils.evaluate(model, val_dataset)
    if extra_val_dataset is not None:
        extra_val_info = utils.evaluate(model, extra_val_dataset)
    else:
        extra_val_info = None

    # report
    utils.report(train_info, val_info, extra_val_info)

    return dict(train_info=train_info, val_info=val_info, extra_val_info=extra_val_info)
Пример #24
0
    def _instantiate(self, conf):
        items = []
        if conf.cpu:
            if conf.cpu in ['core', 'system']:
                items.append( CpuMetrics(conf.cpu == 'core', self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized cpu option `%s', `core' or `system' expected" % conf.cpu)
        if conf.vcpu:
            if conf.vcpu == 'core':
                items.append( VcpuMetrics(self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized vcpu option `%s', `core' expected" % conf.vcpu)
        if conf.mem:
            if conf.mem == 'system':
                items.append( MemMetrics(self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized mem option `%s', `system' expected" % conf.mem)
        if conf.swap:
            if conf.swap == 'system':
                items.append(SwapMetrics(self._interval, self._transport, self._formatter))
            else:
                report("Unrecognized swap option `%s', `system' expected" % conf.swap)
        if conf.disk:
            items.append(DiskIoMetrics(conf.disk, self._interval, self._transport, self._formatter))
        if conf.space:
            items.append(DiskSpaceMetrics(conf.space, self._interval, self._transport, self._formatter))
        if conf.net:
            items.append(NetMetrics(conf.net, self._interval, self._transport, self._formatter))

        for process in conf.processes:
            items.append(ProcMetrics(process[0], process[1], process[2], self._interval, self._transport, self._formatter))

        return items
Пример #25
0
    def on_epoch_end(self, epoch, **kwargs):
        assert self.session is not None
        if epoch > 0 and (epoch % kwargs.get('eval_freq', 4) == 0
                          or epoch == kwargs.get('epochs', 15)):
            # preds, loss, summaries = self.eval(self.X_val)
            pred_val = np.argmax(preds, axis=1)

            if self.true_val is None:
                self.true_val = np.array(np.argmax(self.y_val, axis=1))
            utils.report(self.true_val,
                         pred_val,
                         self.label_set,
                         print_fn=self.print_f)
            self.tfboard_test_writer.add_summary(summaries, epoch)
Пример #26
0
def check_world_dims(width, height, file):

    try:
        width = int(width)
        height = int(height)
    except:
        utils.report(file, 1, "World dimensions should be integers")
        return None, None

    if width <= 0 or height <= 0:
        utils.report(file, 1, "World dimensions cannot be less than 0")
        return None, None

    return width, height
Пример #27
0
def q3(argv=None):

    dataset, method_name, k, latex = wrapper_args(
        argv, 'q3', ['Boston50', 'Boston75', 'Digits'])

    Boston50_X, Boston50_y = prepare_boston50()
    Boston75_X, Boston75_y = prepare_boston75()
    # Note that prepare_digits adds gaussian noise to the data to
    # avoid singlar covariance matrices.  For details, see
    # datasets.prepare_digits
    Digits_X, Digits_y = prepare_digits()

    default_order = [
        ('MultiGaussClassify', 'Boston50'),
        ('MultiGaussClassify', 'Boston75'),
        ('MultiGaussClassify', 'Digits'),
        ('LogisticRegression', 'Boston50'),
        ('LogisticRegression', 'Boston75'),
        ('LogisticRegression', 'Digits')
    ]

    methods = {
        ('MultiGaussClassify', 'Boston50'):
        (MultiGaussClassify(), Boston50_X, Boston50_y),
        ('MultiGaussClassify', 'Boston75'):
        (MultiGaussClassify(), Boston75_X, Boston75_y),
        ('MultiGaussClassify', 'Digits'):
        (MultiGaussClassify(linear=False), Digits_X, Digits_y),
        ('LogisticRegression', 'Boston50'):
        (LogisticRegression(), Boston50_X, Boston50_y),
        ('LogisticRegression', 'Boston75'):
        (LogisticRegression(), Boston75_X, Boston75_y),
        ('LogisticRegression', 'Digits'):
        (LogisticRegression(), Digits_X, Digits_y)
    }

    if dataset == 'all':
        order = default_order
    else:
        order = [(method_name, dataset)]

    for key in order:
        name, dataset = key
        method, X, y = methods[key]
        print('==============')
        print('method: {}, dataset: {}'.format(key[0], key[1]))
        scores = my_cross_val(method, X, y, k)
        report(name, dataset, scores, latex=latex)
Пример #28
0
def q4(argv):

    x_digits, y_digits = load_dataset(load_digits)

    dataset, method_name, k, latex = wrapper_args(argv, 'q4',
                                                  ['X1', 'X2', 'X3'])

    X1 = rand_proj(x_digits, 32)
    X2 = quad_proj(x_digits)

    default_order = [
        ('LinearSVC', 'X1'),
        ('LinearSVC', 'X2'),
        ('SVC', 'X1'),
        ('SVC', 'X2'),
        ('LogisticRegression', 'X1'),
        ('LogisticRegression', 'X2'),
    ]

    methods = {
        ('LinearSVC', 'X1'): (LinearSVC(max_iter=2000), X1, y_digits),
        ('LinearSVC', 'X2'): (LinearSVC(max_iter=2000), X2, y_digits),
        ('SVC', 'X1'): (SVC(gamma='scale', C=10), X1, y_digits),
        ('SVC', 'X2'): (SVC(gamma='scale', C=10), X2, y_digits),
        ('LogisticRegression', 'X1'):
        (LogisticRegression(penalty='l2',
                            solver='lbfgs',
                            multi_class='multinomial',
                            max_iter=5000), X1, y_digits),
        ('LogisticRegression', 'X2'):
        (LogisticRegression(penalty='l2',
                            solver='lbfgs',
                            multi_class='multinomial',
                            max_iter=5000), X2, y_digits)
    }

    if dataset == 'all':
        order = default_order
    else:
        order = [(method_name, dataset)]

    for key in order:
        name, dataset = key
        method, X, y = methods[key]
        print('==============')
        print('method: {}, dataset: {}'.format(name, dataset))
        scores = my_cross_val(method, X, y, k)
        report(name, dataset, scores, latex=latex)
Пример #29
0
 def get_volume(self, volume_uri, verbose=False):
     r = requests.get(self.host + "/volumes/%s" % volume_uri,
                      verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /volume/%s" % volume_uri, r))
     self.handle_error_codes(r)
     return json.loads(r.content)
Пример #30
0
 def get_metadata(self, doi, verbose=False):
     r = requests.get(self.host + '/articles/' + doi,
                      verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /articles/%s" % doi, r))
     self.handle_error_codes(r)
     return json.loads(r.content)
Пример #31
0
 def read_journal(self, journal_key, verbose=False):
     r = requests.get(self.host + "/journals/%s" % journal_key,
                      verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /journals/%s" % journal_key, r))
     self.handle_error_codes(r)
     return json.loads(r.content)
Пример #32
0
    def _base_publish(self,
                      doi,
                      publish=True,
                      syndicate_all=False,
                      verbose=False):
        if publish:
            state = 'published'
        else:
            state = 'ingested'
        payload = {'state': state}

        if syndicate_all:
            syndications = {
                'CROSSREF': {
                    'status': 'IN_PROGRESS'
                },
                'PMC': {
                    'status': 'IN_PROGRESS'
                },
                'PUBMED': {
                    'status': 'IN_PROGRESS'
                }
            }
            payload['syndications'] = syndications

        r = requests.patch(self.host + '/articles/%s' % doi,
                           data=json.dumps(payload),
                           verify=self.verify_ssl)
        if verbose:
            print(utils.report("PATCH /articles/%s" % doi, r))
        self.handle_error_codes(r)
        return json.loads(r.content)
Пример #33
0
def freestyle(loc):  # TODO

    # load data
    model_dir = Path(loc)
    settings = pickle.load(open(model_dir / 'settings.pkl', 'rb'))
    print(settings)

    # settings
    cell = settings['cell']
    hidden_size = settings['hidden_size']
    token = settings['token']
    small = settings['small']
    how_many = 100

    # load the models
    vocab = generate.get_vocab(token, small)
    if token == 'word':
        emb = generate.get_embedding('word2vec')
        input_size = emb.vectors.shape[1]
        output_size = emb.vectors.shape[0]
    elif token == 'character':
        emb = None
        input_size = vocab.size
        output_size = vocab.size
    fnames = os.listdir(model_dir / 'checkpoints')
    fname = fnames[-1]

    # load the model
    model = LanguageModel(cell, input_size, hidden_size, output_size)
    model.load_state_dict(torch.load(model_dir / 'checkpoints' / fname))
    model.eval()

    # monitor
    sents = [
        'The Standard ', 'non-abelian', 'silicon pixel detector',
        'estimate the', '[23] ATLAS'
    ]
    temperatures = [0.01 + 0.1 * i for i in range(11)]
    eval_stream = model_dir / 'evaluate_stream.txt'

    for temperature in temperatures:
        txt = '\nTemperature = {}'.format(temperature)
        utils.report(txt, eval_stream)
        for sent in sents:
            txt = generate.compose(model, vocab, emb, sent, temperature,
                                   how_many)
            utils.report(txt, eval_stream)
Пример #34
0
 def ingestibles(self, verbose=False):
     '''
     returns list of ingestible DOIs as unicode
     '''
     r = requests.get(self.host + '/ingestibles/', verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /ingestibles/", r))
     return json.loads(r.content)
Пример #35
0
 def ingestibles(self, verbose=False):
     '''
     returns list of ingestible DOIs as unicode
     '''
     r = requests.get(self.host + '/ingestibles/', verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /ingestibles/", r))
     return json.loads(r.content)
Пример #36
0
 def create_volume(self, journal_key, volume_uri, display_name, image_uri, verbose=False):
     payload = {
         'volumeUri': volume_uri,
         'displayName': display_name,
         'imageUri': image_uri,
     }
     r = requests.post(self.host + "/journals/%s" % journal_key, data=json.dumps(payload), verify=self.verify_ssl)
     if verbose:
         print(utils.report("POST /journals/%s" % journal_key, r))
     self.handle_error_codes(r)
     return r.content
Пример #37
0
 def create_issue(self, volume_uri, issue_uri, display_name, image_uri, verbose=False):
     payload = {
         'issueUri': issue_uri,
         'displayName': display_name,
         'imageUri': image_uri,
         'respectOrder': True,
     }
     r = requests.post(self.host + "/volumes/%s" % volume_uri, data=json.dumps(payload), verify=self.verify_ssl)
     if verbose:
         print(utils.report("POST /volumes/%s" % volume_uri, r))
     self.handle_error_codes(r)
     return r.content
Пример #38
0
    def __init__(self, conf, default_transport, formatter, debug):
        """Creates an instance of metrics from the configuration."""
        self._ready = False
        if not psutil_available:
            if debug:
                report("Warning: Cannot instantiate metrics, psutil library is not available.")
            return
        if not conf.token:
            if debug:
                report("Warning: Cannot instantiate metrics, token not specified.")
            return

        if debug and not default_transport:
            self._transport = StderrTransport(None)
        elif debug:
            self._transport = StderrTransport(default_transport.get())
        else:
            self._transport = default_transport.get()
        self._formatter = formatter
        self._debug = debug

        self._timer = None
        self._shutdown = False
        self._interval = self._parse_interval(conf.interval)
        if self._interval == 0:
            report("Warning: Cannot instantiate metrics, invalid interval `%s'." % conf.interval)

        self._items = self._instantiate(conf)
        self._ready = True
Пример #39
0
 def modify_issue(self, issue_uri, display_name, image_uri, article_order, verbose=False):
     payload = {
         'respectOrder': True,
         'issueUri': issue_uri,
         'displayName': display_name,
         'imageUri': image_uri,
         'articleOrder': article_order,
     }
     r = requests.patch(self.host + "/issues/%s" % issue_uri, data=json.dumps(payload), verify=self.verify_ssl)
     if verbose:
         print(utils.report("POST /issues/%s" % issue_uri, r))
     self.handle_error_codes(r)
     return r.content
Пример #40
0
    def ingest(self, doi, force_reingest=None, verbose=False):
        '''
        attempts to ingest ingestible article by DOI
        returns article metadata dict if successful
        '''
        payload = {
            'name': doi
            }
        if force_reingest:
            payload['force_reingest'] = True
        r = requests.post(self.host + '/ingestibles', data=payload, verify=self.verify_ssl)
        if verbose:
            print(utils.report("POST /ingestibles/ %s" % pretty_dict_repr(payload), r))

        self.handle_error_codes(r)
        return r.content
Пример #41
0
    def _base_publish(self, doi, publish=True, syndicate_all=False, verbose=False):
        if publish:
            state = 'published'
        else:
            state = 'ingested'
        payload = {
            'state': state
        }

        if syndicate_all:
            syndications = {
                'CROSSREF': {'status': 'IN_PROGRESS'},
                'PMC': {'status': 'IN_PROGRESS'},
                'PUBMED': {'status': 'IN_PROGRESS'}
            }
            payload['syndications'] = syndications

        r = requests.patch(self.host + '/articles/%s' % doi, data=json.dumps(payload), verify=self.verify_ssl)
        if verbose:
            print(utils.report("PATCH /articles/%s" % doi, r))
        self.handle_error_codes(r) 
        return json.loads(r.content)
def run_patch_es6_modules_command(args):
    """patches RN 0.9 to support multiline imports"""
    if react_native_version() != "0.9.0-rc":
        report("Non supported react-native version, skipping packager patching.")
        return

    patterns_file = "node_modules/react-native/packager/" + \
                    "react-packager/src/DependencyResolver/replacePatterns.js"

    content = open(patterns_file).read()
    if AFTER_PATCH_VERSION in content:
        report("Your version was already patched, skipping packager patching.")
        return

    if BEFORE_PATCH_VERSION not in content:
        report("Can't find replacement target, skipping packager patching.")
        return

    content = content.replace(BEFORE_PATCH_VERSION, AFTER_PATCH_VERSION)
    open(patterns_file,'w').write(content)
    report_success("Successfuly patched packager.")
Пример #43
0
 def read_journal(self, journal_key, verbose=False):
     r = requests.get(self.host + "/journals/%s" % journal_key, verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /journals/%s" % journal_key, r))
     self.handle_error_codes(r)
     return json.loads(r.content)
Пример #44
0
    for i in xrange(NTRIALS):
      reset(networks)
      for j in xrange(NTIME_STEPS):
        loc = randint(0, NSERVERS - 1)
        pkt = Packet('id-%d' % (j), ptime)
        for sim_obj in networks:
          network = sim_obj['obj']
          network.step()
          network.add(pkt, loc)
          sim_obj['dat'][ptime][i] += network.latency()

  # Count and report win frequencies
  for ptime in PTIMES:
    win_counts = {strategy : 0 for strategy in STRATEGIES}
    count_wins(networks, ptime, win_counts, NTRIALS)
    report(win_counts, ptime, NTRIALS)
    for strategy, count in win_counts.iteritems():
      win_freqs[strategy].append(count / float(NTRIALS))

  # Plot simulation resuslts
  plt.plot(PTIMES, win_freqs[STRATEGIES[0]], 'bs-', label=STRATEGIES[0])
  plt.plot(PTIMES, win_freqs[STRATEGIES[1]], 'gs-', label=STRATEGIES[1])
  plt.plot(PTIMES, win_freqs[STRATEGIES[2]], 'ys-', label=STRATEGIES[2])
  plt.plot(PTIMES, win_freqs[STRATEGIES[3]], 'rs-', label=STRATEGIES[3])
  plt.plot(PTIMES, win_freqs[STRATEGIES[4]], 'ms-', label=STRATEGIES[4])
  plt.title('Percent of Trials as Best Strategy vs. Processing Time')
  plt.xlabel('Processing Time')
  plt.ylabel('Percent of Trials as Best Strategy')
  plt.legend(loc='center right')
  plt.show()
def run_init_command(args):
    """Initialize the envirionment."""
    report("Setting up HackApp development env", icon=Icons.lollipop, section=True)

    report("Starting setup. This is going to take a while, go grab a coffee", color="magenta", section=True,
           icon=Icons.coffee)
    print ""
    sleep(3)

    if not has_command("pod"):
        report("Setting up CocoaPods (we may need your password for this)", section=True)
        run_command("sudo gem install cocoapods --no-ri --no-rdoc")
        report_success("Installed cocoapods!")

    report("Testing for brew", section=True)
    if not has_command("brew"):
        report("Please install brew to setup the env (go to http://brew.sh/)", color="red")
        return

    report_success("Found brew!")

    report("Updating brew packages", section=True)
    run_command("brew update")

    report("Installing iojs", section=True)
    run_command("brew unlink node")
    run_command("brew install iojs")
    run_command("brew link iojs --force")
    run_command("brew install watchman")
    run_command("brew install flow")
    report_success("Installed iojs")

    report("Instaling global npm packages", section=True)
    run_command("npm install -g react-native-cli gulp")

    report("Instaling project npm packages (this may take a while)", section=True)
    run_command("npm install", hide_output=True)
    report_success("Installed all node dependencies!")
    run_patch_es6_modules_command(args)

    report("Updating pods")
    run_command("pod install")

    report_success("All done!")
    run_command('open HackApp.xcworkspace')
Пример #46
0
    param_grid=param_grid,
    verbose=3,
    scoring='average_precision',
    cv=StratifiedShuffleSplit(Y_train,
        n_iter=10,
        test_size=0.2,
        train_size=None,
        indices=None, 
        random_state=seed,
        n_iterations=None)).fit(X_train, Y_train)

# Score the results
###print result
print("Best score: %0.3f" % grid_search.best_score_)
print(grid_search.best_estimator_)
report(grid_search.grid_scores_)
 
print('-----grid search end------------')
print ('on all train set')
scores = cross_val_score(grid_search.best_estimator_,
    x_train,
    y_train,
    cv=3,
    scoring='accuracy')

print scores.mean(),scores

print ('on test set')
scores = cross_val_score(grid_search.best_estimator_,
    X_test,
    Y_test,
Пример #47
0
 def _get_state(self, doi, verbose=False):
     r = requests.get(self.host + '/articles/%s?state' % doi, verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /articles/%s?state" % doi, r))
     self.handle_error_codes(r)
     return json.loads(r.content)
Пример #48
0
 def get_volume(self, volume_uri, verbose=False):
     r = requests.get(self.host + "/volumes/%s" % volume_uri, verify=self.verify_ssl)
     if verbose:
         print(utils.report("GET /volume/%s" % volume_uri, r))
     self.handle_error_codes(r)
     return json.loads(r.content)