Esempio n. 1
0
def macina(parse, target, filelst, infl, supl):
    pointsdict = {}
    for path in filelst:
        data = parse(path)
        points = extract_point(data, target)
        points = filter(lambda x: x > infl, points)
        for i, p in enumerate(points):
            l = pointsdict.get(i, [])
            l.append(p)
            pointsdict[i] = l

    stats = []

    if parse == parse_netperf:
        starts = pointsdict[0]
        ends = pointsdict[1]

        length = list(e - s for e, s in zip(ends, starts))
        print "netperf hole lengths:", length
        avg = utils.average(length)
        var = utils.variance(length)
        q1, median, q3 = utils.quartiles(length)

        stats.append((length, (avg, var, min(length), q1, median, q3, max(length))))
    else:
        for points in pointsdict.itervalues():
            print "mesh points:", points
            avg = utils.average(points)
            var = utils.variance(points)
            q1, median, q3 = utils.quartiles(points)

        stats.append((points, (avg, var, min(points), q1, median, q3, max(points))))
    return stats
Esempio n. 2
0
    def shortest_path(self, paremeters=None):
        print "measuring sp" # falta basada en pesos, hay que modificar
        measure = []
        measure2 = []
        measure3 = []
        network_size = self.network.vcount()
        new_weights = inverse_weights(self.network.es['weight'])
        weight = new_weights[0]
        weight2 = new_weights[1]

        for i in range(network_size):
            #lenghts = self.network.shortest_paths(i)[0]
            lenghts = self.extra_network.shortest_paths(i)[0]
            lenghts2 = self.network.shortest_paths(i, weights=weight)[0]
            #lenghts3 = self.network.shortest_paths(i, weights=weight2)[0]
            sp = average(lenghts)
            sp2= average(lenghts2)
            #sp3 = average(lenghts3)
            measure.append(sp)
            measure2.append(sp2)
            #measure3.append(sp3)
        ranked_by_sp = sortList(measure)
        ranked_by_sp_w = sortList(measure2)
        #ranked_by_sp_w2 = sortList(measure3)
        print ranked_by_sp
        print ranked_by_sp_w
        #save_vector_to_file(ranked_by_sp_w)
        #print ranked_by_sp_w2
        self.node_rankings['sp'] = ranked_by_sp
        self.node_rankings['sp_w'] = ranked_by_sp_w

        self.node_values['sp'] = measure
        self.node_values['sp_w'] = measure2
 def groundtruthToResultTensor(self, type: str) -> torch.Tensor:
     """
     :param type: "vdd"或"gnd"
     :return: <Tensor n>
     """
     if type == "vdd":
         result = []
         for i in range(self.vddPlaneSize):
             result.append([])
         for nodename in self.nodenameDict:
             idx = self.nodenameDict[nodename]
             if not self.connectToVSource[idx] and self.nodeLabel[
                     idx] == L_VDD:
                 result[self.vddMapping[idx]].append(
                     self.groundTruth[nodename])
         result = list(map(lambda arr: average(arr), result))
         return torch.tensor(result,
                             dtype=torch.float32,
                             device=global_device)
     elif type == "gnd":
         result = [[]] * self.gndPlaneSize
         for nodename in self.nodenameDict:
             idx = self.nodenameDict[nodename]
             if not self.connectToVSource[idx] and self.nodeLabel[
                     idx] == L_GND:
                 result[self.gndMapping[idx]].append(
                     self.groundTruth[nodename])
         result = list(map(lambda arr: average(arr), result))
         return torch.tensor(result,
                             dtype=torch.float32,
                             device=global_device)
Esempio n. 4
0
def results_metrics():
    """
    Aggregator for all student results data
    :return: dictionary for all student data
    """
    quizzes = Quiz.objects.all()
    metrics = {}
    for quiz in quizzes:
        results = Results.objects.filter(quiz=quiz)
        quiz_json = json.loads(quiz.quizjson)
        scores = []
        single_metrics = {}
        for result in results:
            print result
            scores.append(result.score)
        print scores
        single_metrics['scores'] = scores
        single_metrics['name'] = quiz.name
        single_metrics['num_of_questions'] = len(quiz_json['questions'])
        mean = utils.average(scores)
        single_metrics['class_av'] = mean
        single_metrics['std_dev'] = utils.std_deviation(scores, mean)
        single_metrics['subject'] = quiz.subject
        single_metrics['high'] = max(scores)
        single_metrics['low'] = min(scores)
        single_metrics['class_median'] = utils.median(scores)
        metrics[quiz.name] = single_metrics
    return metrics
 def update_weights(self, pose_meas, uwb_meas):
     poses = []
     covs = []
     for i, pose in enumerate(pose_meas):
         if pose is None:
             poses.append(None)
             covs.append(None)
         else:
             mcpf = self.mcpfs[self.connections[i]]
             pose, cov = utils.average(mcpf.transform(pose), mcpf.weights)
             poses.append(pose)
             covs.append(cov)
     for (car_t, car_s), uwb in uwb_meas.items():
         if uwb.distance > 0:
             ti = self.id_to_index[car_t]
             si = self.id_to_index[car_s]
             local_pose = poses[si]
             rel_pose = pose_meas[ti]
             if local_pose is None or rel_pose is None:
                 continue
             local_pose_cov = covs[si] + self.pose_cov
             rel_pose_cov = self.pose_cov
             self.mcpfs[car_t].update_weights(
                 # to cheat, uncomment to give correct transformed pose (correct when transformation is identity)
                 # pose_meas[si], local_pose_cov,
                 local_pose,
                 local_pose_cov,
                 uwb.distance,
                 self.uwb_var,
                 rel_pose,
                 rel_pose_cov)
Esempio n. 6
0
 def print_best(self, gav, iter_num):
     print("Iteration number " + str(iter_num))
     with open("output.txt", 'a') as file:
         file.write("Iteration number: " + str(iter_num) + "\n")
         file.write("    Best fitness: " + str(gav[0].fitness) + "\n")
         overall_value = 0
         overall_weight = 0
         for i in range(self.items):
             if gav[0].knapsack[i] == 1:
                 overall_value = overall_value + self.prices[i]
                 overall_weight = overall_weight + self.weights[i]
                 print("Item " + str(i) + " with value " +
                       str(self.prices[i]) + " and weight " +
                       str(self.weights[i]))
                 file.write("    Item " + str(i) + " with value " +
                            str(self.prices[i]) + " and weight " +
                            str(self.weights[i]) + "\n")
         print("Overall value is " + str(overall_value) + "/" +
               str(sum(self.prices)))
         print("Overall weight is " + str(overall_weight) + "/" +
               str(self.capacity))
         print()
         file.write("    Overall value is " + str(overall_value) + "\n")
         file.write("    Overall weight is " + str(overall_weight) + "\n")
         file.write(
             "    Fitness average: " +
             str(round(utils.average(gav, self.data.ga_popsize), 3)) + "\n")
         file.write(
             "    Fitness deviation: " +
             str(round(utils.deviation(gav, self.data.ga_popsize), 3)) +
             "\n")
Esempio n. 7
0
 def inference(self):
     self.net.eval()  # set the model to evaluation mode
     test_loss = average()
     correct = 0
     total = 0
     with torch.no_grad():
         for batch_idx, (inputs, labels) in enumerate(self.v_loader):
             inputs, labels = inputs.to(self.device), labels.to(
                 self.device)  # upload data to device
             outputs = self.net(inputs)  # inference
             loss = self.loss_fn(outputs, labels)
             test_loss.add(loss.item())
             _, predicted = outputs.max(1)
             total += labels.size(0)
             correct += predicted.eq(labels).sum().item()
     acc = 100. * correct / total
     self.writer.add_scalar('test_acc', acc, global_step=self.epoch - 1)
     self.results.result['test_acc'].append(acc)
     if acc > self.state['acc']:
         self.state['acc'] = acc
         self.state['net'] = self.net.state_dict()
         self.state['epoch'] = self.epoch
         if not os.path.isdir('./checkpoint'):
             os.mkdir('./checkpoint')
         torch.save(self.state, './checkpoint/%s.t7' % self.log)
     return acc
Esempio n. 8
0
    def soil_sensor_check(self, n_samples=10, rate=0.5):
        try:
            samples = self.read_samples(n_samples, rate)
            sampled_adc = average(samples)
            self._soilmoistperc = adc_map(
                sampled_adc,
                self.config["moisture_sensor_cal"]["dry"],
                self.config["moisture_sensor_cal"]["wet"],
            )
            if self._soilmoistperc <= 100:
                print("[DEBUG] Current Soil moisture: %s%%" %
                      self._soilmoistperc)
                self.ubidots.post_request(
                    {"soil_moisture": self._soilmoistperc})

            if self._soilmoistperc <= self.config["moisture_sensor_cal"].get(
                    "Threshold", 50):
                self._water_me = True
                self.message_send(
                    "[INFO] Soil Moisture Sensor: %.2f%% \t %s" %
                    (self._soilmoistperc, current_time()),
                    True,
                )
            else:
                self._water_me = False
        except Exception as exc:
            print("Exception: %s", exc)
        finally:
            force_garbage_collect()
 def print_best(self, gav, iter_num):
     print("Best: " + gav[0].str + " (" + str(gav[0].fitness) + ")")
     with open("output.txt", 'a') as file:
         file.write("Best: " + gav[0].str + " (" + str(gav[0].fitness) + ")\n")
         file.write("    Iteration number: " + str(iter_num) + "\n")
         file.write("    Fitness average: " + str(round(utils.average(gav, self.data.ga_popsize), 3)) + "\n")
         file.write("    Fitness deviation: " + str(round(utils.deviation(gav, self.data.ga_popsize), 3)) + "\n")
Esempio n. 10
0
 def setAvgVolume(self):
     volumes = self.get('volumes')
     new_vols = utils.map(volumes, self.__zeroNonesVol)
     self.set('volumes', new_vols)
     avg = utils.average(new_vols)
     self.set('avg_volume', avg)
     return avg
Esempio n. 11
0
    def __str__(self):

        s = '%s(busy=%s, <time>=%.2f, jobs=%d, host="%s", proxy="%s"' \
             % (self.__class__.__name__, self.acquired, average(self.time), self.jobs, \
                self.host.name, self.proxy)

        return s
Esempio n. 12
0
 def eval(self, x, y):
     w = 0.5 * (self.w.eval(x, y)[0] + 1.0)
     c1 = self.e1.eval(x, y)
     c2 = self.e2.eval(x, y)
     return average(
         c1,
         c2,
     )
Esempio n. 13
0
def print_statistics(data, label):
    avg = utils.average(data)
    var = utils.variance(data)
    minp = min(data)
    q1st, median, q3rd = utils.quartiles(data)
    maxp = max(data)

    print("%s: avg=%.3f, var=%.3f, min=%.3f, 1stq=%.3f, median=%.3f, 3rdq=%.3f, max=%.3f"
          % (label, avg, var, minp, q1st, median, q3rd, maxp))
Esempio n. 14
0
 def run(self, left, right, match):
     lng = utils.average(self.parent.df.iloc[left].longitude,
                         self.parent.df.iloc[right].longitude)
     lat = utils.average(self.parent.df.iloc[left].latitude,
                         self.parent.df.iloc[right].latitude)
     places = self.google_places.nearby_search(lat_lng={
         'lat': lat,
         'lng': lng
     },
                                               radius=self.raduis).places
     left_best_match = best_places_match(
         places, self.parent.df.iloc[left].standardized_name)
     right_best_match = best_places_match(
         places, self.parent.df.iloc[right].standardized_name)
     print(left_best_match, right_best_match)
     if (left_best_match == right_best_match):
         return labeler.LABEL.MATCH
     else:
         return labeler.LABEL.NOTMATCH
Esempio n. 15
0
        def fill_block_sizes(bytes, max_bytes_per_block):
            if bytes <= max_bytes_per_block:
                return [bytes]
            else:
                block_sizes = [max_bytes_per_block] * (bytes / max_bytes_per_block)

                remainder = bytes % max_bytes_per_block
                if remainder != 0:
                    last_two_avg = utils.average([remainder, max_bytes_per_block])
                    block_sizes[-1] = int(math.ceil(last_two_avg))
                    block_sizes.append(int(math.floor(last_two_avg)))

            return block_sizes
Esempio n. 16
0
    def training(self):
        train_loss = average()
        train_loss.clear()
        self.net.train()  # set the model to train mode
        correct = 0
        total = 0
        self.lr_decay()
        for index, (inputs, labels) in enumerate(self.t_loader, 0):
            inputs, labels = inputs.to(self.device), labels.to(self.device)
            self.optim.zero_grad()

            if self.mixup:
                inputs, labels_a, labels_b, lam = mixup_data(
                    inputs, labels, 1, True)
                inputs, labels_a, labels_b = map(Variable,
                                                 (inputs, labels_a, labels_b))
                outputs = self.net(inputs)
                loss = mixup_criterion(self.loss_fn, outputs, labels_a,
                                       labels_b, lam)
            else:
                outputs = self.net(inputs)
                loss = self.loss_fn(outputs, labels)
            loss.backward()
            # print(loss.item())
            torch.nn.utils.clip_grad_value_(self.net.parameters(),
                                            clip_value=2.)
            self.optim.step()
            train_loss.add(loss)
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
        self.writer.add_scalar('train_loss',
                               train_loss.value,
                               global_step=self.epoch)
        self.writer.add_scalar('train_acc',
                               float(correct) / float(total),
                               global_step=self.epoch)
        self.writer.add_scalar('lr', self.lr, global_step=self.epoch)

        self.results.result['train_loss'].append(train_loss.value)
        self.results.result['train_acc'].append(float(correct) / float(total))
        self.results.result['epoch'].append(self.epoch)

        self.epoch += 1
        return train_loss.value
Esempio n. 17
0
    def build_binary_tree(self, indice_0, indice_1, depth):
        self.depth = depth

        if len(indice_0) + len(indice_1) <= self.n0:
            # print(self.depth)
            if self.task == "classfication":
                value = vote_for_one(self.y[indice_0 + indice_1])
            else:
                value = average(self.y[indice_0 + indice_1])
            return TreeNode(value, None, None, None, None, depth)

        idx_0, idx_1 = self.pick_two_samples(indice_0, indice_1)
        left_indice_0, left_indice_1, right_indice_0, right_indice_1 = self.separate_samples(
            indice_0, indice_1, idx_0, idx_1)
        left_child = self.build_binary_tree(left_indice_0, left_indice_1,
                                            depth + 1)
        right_child = self.build_binary_tree(right_indice_0, right_indice_1,
                                             depth + 1)
        return TreeNode(None, idx_0, idx_1, left_child, right_child, depth)
Esempio n. 18
0
 def __init__(self, data, distance_constructor, stretch=True, *args, **kwargs):
     dm = distance_constructor(data)
     sm = lambda *args, **kwargs: 1 - dm(*args, **kwargs)
     get_density = lambda ex: average((sm(ex, o) for o in data))
     self.density_lookup = dict(((ex, get_density(ex)) for ex in data))
     if stretch:
         min_density = min(self.density_lookup.itervalues())
         max_density = max(self.density_lookup.itervalues())
         assert(0 <= min_density <= 1)
         assert(0 <= max_density <= 1)
         
         max_density_after = max_density - min_density
         
         scale_prod = 1.0/max_density_after
         
         for k in self.density_lookup.iterkeys():
             stretched_d = (self.density_lookup[k] - min_density) * scale_prod
             assert(0 <= stretched_d <= 1)
             self.density_lookup[k] = stretched_d
Esempio n. 19
0
def get_vcpu_load_avgs(instance):
    cpu_files = utils.get_whisper_files_by_metric(
                "cpu",
                utils.get_whisper_files_by_instance_id(instance.id))

    if not cpu_files:
        return "N/A"

    # TODO: what if we have more than one CPU database??
    f = cpu_files[0]
    avgs = []

    for duration in [1, 5, 15]:
        fetched = whisper.fetch(f, time.time() - 60*duration)
        times, data = fetched
        load_avg = utils.average(data)
        avgs.append(load_avg)

    return "%f | %f | %f" % tuple(avgs)
Esempio n. 20
0
 def extraFea(self, x):
     output = []
     input = x
     for x in input:
         result = []
         x = abs(x[3].cpu().numpy())
         result.append(utils.mean_abs_dev(x))
         result.append(utils.average(x))
         result.append(utils.fre_skewness(x))
         result.append(utils.fre_kurtosis(x))
         result.append(utils.energy(x))
         result.append(utils.entropy(x))
         #temp = utils.ar_coef(x)
         #for i in temp:
         #    result.append(i)
         x1 = x[0:int(len(x) / 2)]
         x2 = x[int(len(x) / 2):]
         result.append(utils.correlation(x1, x2))
         result.append(utils.fswa(x))
         output.append([result])
     return output
 def __init__(self, all_results=None):
     all_results = list(all_results or [])
     self.all_results = all_results
     
     aligned_results = zip(*all_results)
     
     ResultSet.__init__(self)
     
     if not all((all((result is not None and result.case_base_size == aligned_result[0].case_base_size
                      for result
                      in aligned_result))
                 for aligned_result
                 in aligned_results)):
         raise Exception("case_base_sizes don't seem aligned")
     
     averaged_result_instances = (Result(aligned_result[0].case_base_size, 
                                         average((result.classification_accuracy 
                                                  for result in aligned_result))) 
                                  for aligned_result 
                                  in aligned_results)
     
     self.extend(averaged_result_instances)
Esempio n. 22
0
def centroidemd(source_path):
    get_embds = lambda path: [[float(y) for y in x.split(" ")]
                              for x in utils.fileaslist(path[:-3] + "emd")]
    if source_path not in cache:
        source_embds = get_embds(source_path)
        cache[source_path] = utils.average(source_embds)

    source_sens = utils.fileaslist(source_path)
    centroid = cache[source_path]

    assert len(source_sens) == len(source_embds)

    best = set()
    for j in range(FIRST_N_LINES):
        try:
            best.add(
                max(set(range(len(source_embds))) - best,
                    key=lambda i: utils.cosine_similarity(
                        source_embds[i], centroid)))
        except ValueError:
            print "too small text"

    return "\n".join([source_sens[i] for i in best])
Esempio n. 23
0
def anicam(parse, filelst, infl, supl):
    points = []
    gpoints = []
    for path in filelst:
        offset = None
        tmp = []
        data = parse(path)
        for t, v in data:
            tmp.append((t,v))
            if t >= infl and t <= supl:
                points.append(v)
            if offset == None and v == 0 and t >=supl:
                offset = 40 - t

        if offset == None: raise ValueError("Not found any 0")
        for t,v in tmp:
            gpoints.append(((t + offset), v))

    avg = utils.average(points)
    var = utils.variance(points)
    q1, median, q3 = utils.quartiles(points)
    
    return gpoints, (avg, var, min(points), q1, median, q3, max(points))
Esempio n. 24
0
def reducer(lines, ABS):
#    print '[0, 3000), [3000, 4000), [4000, 5000), [5000, 6000), [6000,sys.maxint) '
    rates = {}
    for line in lines:
        history_ag = History.get_from_historyline(line)
        if history_ag.average < 3000:
	    grade = '[0, 3000)'
	elif history_ag.average >= 3000 and history_ag.average < 4000:
	    grade = '[3000, 4000)'
	elif history_ag.average >= 4000 and history_ag.average < 5000:
	    grade = '[4000, 5000)'
	elif history_ag.average >= 5000 and history_ag.average < 6000:
	    grade = '[5000, 6000)'
	else:
	    grade = '[6000,sys.maxint)'
	rates.setdefault(grade, [])
	rates[grade].append(history_ag.increase_rate)

    if ABS:
        for grade in rates:
            print '%s\t%s' % (grade, utils.abs_average(rates[grade]))
    else:
        for grade in rates:
            print '%s\t%s' % (grade, utils.average(rates[grade]))
Esempio n. 25
0
    ("cape_town_preci.csv.gz", utils.yearly, "k", "Precipitation", "Yearly",
     "Rainfall"),
    "LEGEND",
    "FIGURE",
    ("cape_town_min_t.csv.gz", utils.yearly, "b", "Min temp", "Yearly",
     "Temp"),
    ("cape_town_max_t.csv.gz", utils.yearly, "r", "Max temp", "Yearly",
     "Temp"),
    "LEGEND",
]

for average_data in [False, True]:
    for f in figures:
        if f == "FIGURE":
            name = ""
            fig, ax = plt.subplots()
            continue
        if f == "LEGEND":
            ax.legend()
            if average_data:
                name += "Averaged"
            plt.savefig("./png/" + name + '.png', bbox_inches='tight')
            continue
        (filename, func, col, label, xlabel, ylabel) = f
        name += label + xlabel
        (xs, ys) = utils.get_data(func, filename)
        print(filename, len(xs))
        if average_data:
            (xs, ys, counts) = utils.average(xs, ys)
        utils.beautiful_plot(xs, ys, ax, col, label, xlabel, ylabel)
Esempio n. 26
0
 def get_velocity(self, encoder_side: EncoderSide):
     encoders_querying = self.get_encoders_side(encoder_side)
     return average([encoder.getRate() for encoder in encoders_querying])
Esempio n. 27
0
 def get_position(self, encoder_side: EncoderSide):
     encoders_querying = self.get_encoders_side(encoder_side)
     return average(
         [encoder.getDistance() for encoder in encoders_querying])
 
 with open(output_f, 'wb') as output_s:
     writer = csv.writer(output_s)
     writer.writerow([""] + dsn_to_results_dict.keys() 
                     + [options.avg_rank_col_name] if options.add_avg_rank_col else [] )
     strats_results = zip(*dsn_to_results_dict.values())
     names_to_strats_results = zip(strats, strats_results)
     for (strat_name, strat_results) in names_to_strats_results:
         if options.include_ranks:
             formatter = lambda res: "%.03f (%d)" % (res.score, res.rank) 
         else:
             formatter = lambda res: "%.03f" % res.score
             
         if options.highlight_best:
             old_f = formatter
             formatter = lambda s: r"\textbf{%s}" % old_f(s)
         
         if options.abbreviatepast > 0 and len(strat_name) > options.abbreviatepast:
             strat_name = abbreviate(strat_name,"+")
             
         
         row = [strat_name] + map(formatter, strat_results)
         if options.add_avg_rank_col:
             row += ["%.02f" % average((r.rank for r in strat_results))]
         
         writer.writerow(row)
     
         
 
 
 
Esempio n. 29
0
    def iter_bitstream(self, iter_duration_generator):
        """
        iterate over self.iter_trigger() and
        yield the bits
        """
        assert self.half_sinus == False # Allways trigger full sinus cycle

        # build min/max Hz values
        bit_nul_min_hz = self.cfg.BIT_NUL_HZ - self.cfg.HZ_VARIATION
        bit_nul_max_hz = self.cfg.BIT_NUL_HZ + self.cfg.HZ_VARIATION

        bit_one_min_hz = self.cfg.BIT_ONE_HZ - self.cfg.HZ_VARIATION
        bit_one_max_hz = self.cfg.BIT_ONE_HZ + self.cfg.HZ_VARIATION

        bit_nul_max_duration = self._hz2duration(bit_nul_min_hz)
        bit_nul_min_duration = self._hz2duration(bit_nul_max_hz)

        bit_one_max_duration = self._hz2duration(bit_one_min_hz)
        bit_one_min_duration = self._hz2duration(bit_one_max_hz)

        log.info("bit-0 in %sHz - %sHz (duration: %s-%s)  |  bit-1 in %sHz - %sHz (duration: %s-%s)" % (
            bit_nul_min_hz, bit_nul_max_hz, bit_nul_min_duration, bit_nul_max_duration,
            bit_one_min_hz, bit_one_max_hz, bit_one_min_duration, bit_one_max_duration,
        ))
        assert bit_nul_max_hz < bit_one_min_hz, "HZ_VARIATION value is %sHz too high!" % (
            ((bit_nul_max_hz - bit_one_min_hz) / 2) + 1
        )
        assert bit_one_max_duration < bit_nul_min_duration, "HZ_VARIATION value is too high!"

        # for end statistics
        bit_one_count = 0
        one_hz_min = sys.maxint
        one_hz_avg = None
        one_hz_max = 0
        bit_nul_count = 0
        nul_hz_min = sys.maxint
        nul_hz_avg = None
        nul_hz_max = 0

        for duration in iter_duration_generator:

            if bit_one_min_duration < duration < bit_one_max_duration:
                hz = self._duration2hz(duration)
                log.log(5,
                    "bit 1 at %s in %sSamples = %sHz" % (
                        self.pformat_pos(), duration, hz
                    )
                )
                yield 1
                bit_one_count += 1
                if hz < one_hz_min:
                    one_hz_min = hz
                if hz > one_hz_max:
                    one_hz_max = hz
                one_hz_avg = average(one_hz_avg, hz, bit_one_count)
            elif bit_nul_min_duration < duration < bit_nul_max_duration:
                hz = self._duration2hz(duration)
                log.log(5,
                    "bit 0 at %s in %sSamples = %sHz" % (
                        self.pformat_pos(), duration, hz
                    )
                )
                yield 0
                bit_nul_count += 1
                if hz < nul_hz_min:
                    nul_hz_min = hz
                if hz > nul_hz_max:
                    nul_hz_max = hz
                nul_hz_avg = average(nul_hz_avg, hz, bit_nul_count)
            else:
                hz = self._duration2hz(duration)
                log.log(7,
                    "Skip signal at %s with %sHz (%sSamples) out of frequency range." % (
                        self.pformat_pos(), hz, duration
                    )
                )
                continue

        bit_count = bit_one_count + bit_nul_count

        if bit_count == 0:
            print "ERROR: No information from wave to generate the bits"
            print "trigger volume to high?"
            sys.exit(-1)

        log.info("\n%i Bits: %i positive bits and %i negative bits" % (
            bit_count, bit_one_count, bit_nul_count
        ))
        if bit_one_count > 0:
            log.info("Bit 1: %sHz - %sHz avg: %.1fHz variation: %sHz" % (
                one_hz_min, one_hz_max, one_hz_avg, one_hz_max - one_hz_min
            ))
        if bit_nul_count > 0:
            log.info("Bit 0: %sHz - %sHz avg: %.1fHz variation: %sHz" % (
                nul_hz_min, nul_hz_max, nul_hz_avg, nul_hz_max - nul_hz_min
            ))
Esempio n. 30
0
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel

import utils

files = [("cape_town_min_t.csv.gz", "Min Temp", 'b-'),
         ("cape_town_max_t.csv.gz", "Max Temp", 'r-'),
         #  ("cape_town_preci.csv.gz", "Precipitation"),
         ]
funcs = [(utils.daily, "Daily"), (utils.yearly, "Yearly")]

for (func, xlabel) in funcs:
    fig = plt.figure()
    for (f, ylabel, col) in files:
        (X, Y) = utils.get_data(func, f)
        (X, Y, _) = utils.average(X, Y)

        # Mesh the input space for evaluations of the real function, the
        # prediction and its MSE
        x = np.atleast_2d(np.linspace(min(X), max(X), 1000)).T

        X = np.reshape(np.array(X), (-1, 1))
        Y = np.reshape(np.array(Y), (-1, 1))

        # Instantiate a Gaussian Process model
        kernel = RBF(length_scale=1.0, length_scale_bounds=(1e-1, 1e3)) \
            + WhiteKernel(noise_level=1e-1, noise_level_bounds=(1e-2, 1e+2))
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)

        # Fit to data using Maximum Likelihood Estimation of the parameters
        gp.fit(X, Y)
Esempio n. 31
0
    ?s tb:juniper-infusion ?inf.
  }
}'''

accounts = {}
for (s, inf, herb, districtn) in sparqllib.query_for_rows(query):
    inf = (inf == 'true')
    already_juniper = accounts.get(s, (None, False, None))[1]
    accounts[s] = (inf, already_juniper or herb == JUNIPER, districtn)

accounts = [(districtn, (0.75 + (0.25 if inf else 0)) if juniper else 0)
            for (inf, juniper, districtn) in accounts.values()]

district_index = utils.index(accounts)

from pprint import pprint
# pprint(district_index)

district_to_value = {
    name: utils.average(points)
    for (name, points) in district_index.items()
}
for (district_name) in sparqllib.query_for_list(district_query):
    if district_name not in district_to_value:
        district_to_value[district_name] = None  # means: no data

pprint(district_to_value)

themap = config.make_map_from_cli_args(map_type='choropleth')
themap.render_to('juniper-choropleth-map', district_to_value)

callbacks = [
  # Interrupt training if `val_loss` stops improving for over 2 epochs
  tf.keras.callbacks.EarlyStopping(patience=50, monitor='val_loss'),
  # Write TensorBoard logs to `./logs` directory
  tf.keras.callbacks.TensorBoard(log_dir='./my_inception_v3/20190201/logs')
]
# train the model on the new data for a few epochs
history = model.fit(train_generator, epochs=1, steps_per_epoch=10, 
  validation_data=validation_generator, validation_steps=1, 
  callbacks=callbacks)

print('max_val_acc: ',max(history.history['val_acc']))
print('min_val_acc: ',min(history.history['val_acc']))
print('average_val_acc: ',utils.average(history.history['val_acc']))
print('max_val_loss: ',max(history.history['val_loss']))
print('min_val_loss: ',min(history.history['val_loss']))
print('average_val_loss: ',utils.average(history.history['val_loss']))
print('train_acc: ',max(history.history['acc']))
print('train_loss: ',min(history.history['loss']))
print("train/val loss ratio: ", min(history.history['loss'])/min(history.history['val_loss']))

model.save('my_inception_v3/20190201/inception_v3-model-20190201.h5')
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.

# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
Esempio n. 33
0
    vars = tf.trainable_variables(scope='discriminator')

    targets = tf.placeholder(dtype=tf.float32, shape=(None))
    loss = tf.losses.sigmoid_cross_entropy(targets, logits)
    optimizer = tf.train.AdamOptimizer(5e-4).minimize(loss, var_list=vars)


#%% adversarial training setup
Generator.adversarial_loss = tf.losses.sigmoid_cross_entropy(
    tf.ones_like(Discriminator.targets), Discriminator.logits)
Generator.mse_weight = tf.placeholder(tf.float32, shape=())
Generator.ssim_weight = tf.placeholder(tf.float32, shape=())
Generator.adversarial_weight = tf.placeholder(tf.float32, shape=())
Generator.loss = utils.average(
    [Generator.mse_loss, Generator.ssim_loss, Generator.adversarial_loss],
    weights=[
        Generator.mse_weight, Generator.ssim_weight,
        Generator.adversarial_weight
    ])
Generator.optimizer = tf.train.AdamOptimizer(5e-4).minimize(
    Generator.loss, var_list=Generator.vars)

#%% training/loading
session = tf.Session()
saver = tf.train.Saver()
save_location = 'checkpoints/hybrid.ckpt'

fakes_library = []  # used to prevent instability
demo_inputs = [skimage.io.imread('test/elon.jpg')]
demo_outputs = []

Esempio n. 34
0
 def test_average(self):
     self.assertEqual(average(1, 2, 3), 2)
     self.assertNotEqual(average(2, 2.4), 2)
 def predicted_state(self):
     return utils.average(self.particles, self.weights)
Esempio n. 36
0
 def average_resource_spec_common(self, r, sts, func):
     return utils.average((func(r, t, sts) for t in self.T), len(self.T))
Esempio n. 37
0
import utils
numbers = [245,512,51]
maximum = utils.find_max(numbers)
minimum = utils.find_min(numbers)
ave = utils.average(numbers)
print(ave)
Esempio n. 38
0
 def eval(self, x, y):
     return average(self.e1.eval(x, y), self.e2.eval(x, y))
Esempio n. 39
0
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy', utils.top_3_accuracy])

callbacks = [
    # Interrupt training if `val_loss` stops improving for over 2 epochs
    tf.keras.callbacks.EarlyStopping(patience=50, monitor='val_loss'),
    # Write TensorBoard logs to `./logs` directory
    tf.keras.callbacks.TensorBoard(log_dir='./my_basic_cnn/20190119/logs')
]
# train the model on the new data for a few epochs
history = model.fit(train_generator,
                    epochs=200,
                    steps_per_epoch=2000,
                    validation_data=validation_generator,
                    validation_steps=100,
                    callbacks=callbacks)

print('max_val_acc: ', max(history.history['val_acc']))
print('min_val_acc: ', min(history.history['val_acc']))
print('average_val_acc: ', utils.average(history.history['val_acc']))
print('max_val_loss: ', max(history.history['val_loss']))
print('min_val_loss: ', min(history.history['val_loss']))
print('average_val_loss: ', utils.average(history.history['val_loss']))
print('train_acc: ', max(history.history['acc']))
print('train_loss: ', min(history.history['loss']))
print("train/val loss ratio: ",
      min(history.history['loss']) / min(history.history['val_loss']))

model.save('my_basic_cnn/20190119/basic_cnn-model-20190119.h5')