def ddos_svm_rbf_data(self):
        """Prepare data for the ml_ddos_svm_rbf classifier.

        The features are log(totalSourceBytes), totalSourcePackets,
        and flow duration.

        :return: Tuple of data and labels as NumPy arrays.
        """
        self._logging.debug("Preparing data for SVM (RBF kernel) "
                            "DDoS attack classifier.")
        features = ["totalSourceBytes", "totalSourcePackets",
                    "startDateTime", "stopDateTime"]
        selected_data = self._return_features(self._raw_data, features)
        transformed_data = []
        for flow in selected_data:
            new_entry = []
            src_bytes = 0
            try:
                src_bytes = math.log(float(flow[0]))
            except ValueError:
                # Log (base 10) could not be evaluated, so set it to 0.
                # This has arisen as the number of source bytes is 0.
                # If the number of source bytes as listed in the
                # dataset is not 0, then something is wrong with the
                # data.
                pass
            new_entry.append(src_bytes)
            new_entry.append(flow[1])  # copy in totalSourcePackets
            start_dt = datetime.strptime(flow[2], "%Y-%m-%dT%H:%M:%S")
            stop_dt = datetime.strptime(flow[3], "%Y-%m-%dT%H:%M:%S")
            duration = (stop_dt-start_dt).seconds
            new_entry.append(duration)
            transformed_data.append(new_entry)
        return (np_array.array(transformed_data).astype(float32),
                np_array.array(self._raw_labels).astype(float32))
    def split(self, axis=None):
        """Split the cuboid in half

        If axis is specified, the cuboid is restructure along the given axis, else the maximal axis is chosen.

        :param axis: axis along which to restructure (optional)
        :type axis: int
        :return: cuboid1, cuboid2
        :rtype: tuple(Cuboid, Cuboid)
        """
        if axis:
            index = axis
        else:
            # determine dimension in which to restructure
            index = numpy.argmax(abs(self.high_corner - self.low_corner))
        # determine value at splitting point
        split = (self.high_corner[index] + self.low_corner[index]) / 2
        low_corner1 = array(self.low_corner)
        low_corner2 = array(self.low_corner)
        low_corner2[index] = split
        high_corner1 = array(self.high_corner)
        high_corner2 = array(self.high_corner)
        high_corner1[index] = split
        return Cuboid(low_corner1,
                      high_corner1), Cuboid(low_corner2, high_corner2)
Example #3
0
def _create_J_without_numba(Ybus, V, ref, pvpq, pq, slack_weights, dist_slack):
    # create Jacobian with standard pypower implementation.
    dS_dVm, dS_dVa = dSbus_dV(Ybus, V)

    ## evaluate Jacobian

    if dist_slack:
        rows_pvpq = array(r_[ref, pvpq]).T
        cols_pvpq = r_[ref[1:], pvpq]
        J11 = dS_dVa[rows_pvpq, :][:, cols_pvpq].real
        J12 = dS_dVm[rows_pvpq, :][:, pq].real
    else:
        rows_pvpq = array([pvpq]).T
        cols_pvpq = pvpq
        J11 = dS_dVa[rows_pvpq, cols_pvpq].real
        J12 = dS_dVm[rows_pvpq, pq].real
    if len(pq) > 0 or dist_slack:
        J21 = dS_dVa[array([pq]).T, cols_pvpq].imag
        J22 = dS_dVm[array([pq]).T, pq].imag
        if dist_slack:
            J10 = sparse(slack_weights[rows_pvpq].reshape(-1, 1))
            J20 = sparse(zeros(shape=(len(pq), 1)))
            J = vstack([hstack([J10, J11, J12]),
                        hstack([J20, J21, J22])],
                       format="csr")
        else:
            J = vstack([hstack([J11, J12]), hstack([J21, J22])], format="csr")
    else:
        J = vstack([hstack([J11, J12])], format="csr")
    return J
Example #4
0
    def ddos_random_forest_data(self):
        """Prepare data for the ml_ddos_random_forest classifier.

        The features are totalSourceBytes, totalSourcePackets,
        totalDestinationBytes, totalDestinationPackets and flow duration.

        :return: Tuple of data and labels as NumPy arrays.
        """
        self._logging.debug("Preparing data for Random Forest DDoS "
                            "attack classifier.")
        features = [
            "totalSourceBytes", "totalSourcePackets", "totalDestinationBytes",
            "totalDestinationPackets", "startDateTime", "stopDateTime"
        ]
        selected_data = self._return_features(self._raw_data, features)
        transformed_data = []
        for flow in selected_data:
            new_entry = flow[0:4]  # copy in the first 4 elements
            start_dt = datetime.strptime(flow[4], "%Y-%m-%dT%H:%M:%S")
            stop_dt = datetime.strptime(flow[5], "%Y-%m-%dT%H:%M:%S")
            duration = (stop_dt - start_dt).seconds
            new_entry.append(duration)
            transformed_data.append(new_entry)
        return (np_array.array(transformed_data).astype(float32),
                np_array.array(self._raw_labels).astype(float32))
Example #5
0
    def classify(self):
        """Classify DDoS flows using Linear Discriminant Analysis.

        The data passed through to the fit() method cannot be a string
        type.

        :return: Results of the classification.
        """
        all_results = []  # Results from all fold trials
        fold_num = 1
        for train, test in self._kfold:
            print("\tTraining LDA...")
            # NOTE: I have switched the training and testing set around.
            train_array = np_array.array(map(self._data.__getitem__,
                                             test)).astype(np_float)
            train_label_array = np_array.array(
                map(self._labels.__getitem__, test)).astype(np_float)
            self._classifier.fit(train_array, train_label_array)
            print("\tTesting classifier...")
            test_array = np_array.array(map(self._data.__getitem__,
                                            train)).astype(np_float)
            test_label_array = np_array.array(
                map(self._labels.__getitem__, train)).astype(np_float)
            test_size = len(train)  # Remember the switch of sets!
            pred = self._classifier.predict(test_array)
            mislabeled = (test_label_array != pred).sum()
            tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
            detection_rate = rc.detection_rate(tp, fn)
            false_pos_rate = rc.false_positive_rate(tn, fp)
            all_results.append([
                fold_num, tp, tn, fp, fn, detection_rate, false_pos_rate,
                mislabeled, test_size
            ])
            fold_num += 1
        return all_results
Example #6
0
    def get_dataframe(self, attr, measure='mean', sum=False, cum=False):
        """
        :rtype NDFrame
        """
        values = []
        for name in self.names:
            market_periods_by_date = self.periods_by_market_and_date[name]
            values.append(
                [market_periods_by_date[date][attr] for date in self.dates])

        values = array(values)  # shape is names-dates-samples

        if cum:
            # Accumulate over the dates, the second axis.
            # shape is the same: names-dates-samples
            values = values.cumsum(axis=1)

        if sum:
            # Sum over the names, the first axis.
            # shape is dates-samples
            values = values.sum(axis=0)
            pass

        if measure == 'mean':
            values = values.mean(axis=-1)
        elif measure == 'std':
            values = values.std(axis=-1)
        elif measure == 'quantile':
            assert self.confidence_interval is not None
            low_percentile = (100 - self.confidence_interval) / 2.0
            high_percentile = 100 - low_percentile
            mean = values.mean(axis=-1)
            low = mean - nanpercentile(values, q=low_percentile, axis=-1)
            high = nanpercentile(values, q=high_percentile, axis=-1) - mean
            errors = []
            if sum:
                # Need to return 2-len(dates) sized array, for a Series.
                errors.append([low, high])
            else:
                # Need to return len(names)-2-len(dates) sized array, for a DateFrame.
                for i in range(len(self.names)):
                    errors.append([low[i], high[i]])
            values = array(errors)
            return values
        # elif measure == 'direct':
        #     raise NotImplementedError()
        #     if len(values) == 1:
        #         values = values[0]
        #     else:
        #         raise NotImplementedError()
        #     return DataFrame(values, index=dates, columns=names)
        else:
            raise Exception("Measure '{}' not supported".format(measure))

        if sum:
            return Series(values, index=self.dates)
        else:
            return DataFrame(values.T, index=self.dates, columns=self.names)
Example #7
0
    def get_dataframe(self, attr, measure='mean', sum=False, cum=False):
        """
        :rtype NDFrame
        """
        values = []
        for name in self.names:
            market_periods_by_date = self.periods_by_market_and_date[name]
            values.append([market_periods_by_date[date][attr] for date in self.dates])

        values = array(values)  # shape is names-dates-samples

        if cum:
            # Accumulate over the dates, the second axis.
            # shape is the same: names-dates-samples
            values = values.cumsum(axis=1)

        if sum:
            # Sum over the names, the first axis.
            # shape is dates-samples
            values = values.sum(axis=0)
            pass

        if measure == 'mean':
            values = values.mean(axis=-1)
        elif measure == 'std':
            values = values.std(axis=-1)
        elif measure == 'quantile':
            assert self.confidence_interval is not None
            low_percentile = (100 - self.confidence_interval) / 2.0
            high_percentile = 100 - low_percentile
            mean = values.mean(axis=-1)
            low = mean - nanpercentile(values, q=low_percentile, axis=-1)
            high = nanpercentile(values, q=high_percentile, axis=-1) - mean
            errors = []
            if sum:
                # Need to return 2-len(dates) sized array, for a Series.
                errors.append([low, high])
            else:
                # Need to return len(names)-2-len(dates) sized array, for a DateFrame.
                for i in range(len(self.names)):
                    errors.append([low[i], high[i]])
            values = array(errors)
            return values
        # elif measure == 'direct':
        #     raise NotImplementedError()
        #     if len(values) == 1:
        #         values = values[0]
        #     else:
        #         raise NotImplementedError()
        #     return DataFrame(values, index=dates, columns=names)
        else:
            raise Exception("Measure '{}' not supported".format(measure))

        if sum:
            return Series(values, index=self.dates)
        else:
            return DataFrame(values.T, index=self.dates, columns=self.names)
Example #8
0
    def classify(self):
        """Classify DDoS flows using a Support Vector Machine.

        Note that SVM cannot handle too many data points for training.
        The exact number however is not currently known... Therefore use
        the StratifiedKFold object to obtain an even smaller training
        set. Alternatively, switch the training and testing sets around.
        It's an ugly hack...
        
        The data passed through to the fit() method cannot be a string
        type.

        :return: Results of the classification.
        """
        classifier = svm.SVC(
            C=self._config["C"],
            kernel=self._config["kernel"],
            degree=self._config["degree"],
            gamma=self._config["gamma"],
            coef0=self._config["coef0"],
            shrinking=self._config["shrinking"],
            probability=self._config["probability"],
            tol=self._config["tol"],
            cache_size=self._config["cache_size"],
            class_weight=self._config["class_weight"],
            verbose=self._config["verbose"],
            max_iter=self._config["max_iter"],
            decision_function_shape=self._config["decision_function_shape"],
            random_state=self._config["random_state"])
        all_results = []  # Results from all fold trials
        fold_num = 1
        for train, test in self._kfold:
            print("\tTraining SVM...")
            # NOTE: I have switched the training and testing set around.
            train_array = np_array.array(map(self._data.__getitem__,
                                             test)).astype(np_float)
            train_label_array = np_array.array(
                map(self._labels.__getitem__, test)).astype(np_float)
            classifier.fit(train_array, train_label_array)
            print("\tTesting classifier...")
            test_array = np_array.array(map(self._data.__getitem__,
                                            train)).astype(np_float)
            test_label_array = np_array.array(
                map(self._labels.__getitem__, train)).astype(np_float)
            test_size = len(train)  # Remember the switch of sets!
            pred = classifier.predict(test_array)
            mislabeled = (test_label_array != pred).sum()
            tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
            detection_rate = rc.detection_rate(tp, fn)
            false_pos_rate = rc.false_positive_rate(tn, fp)
            all_results.append([
                fold_num, tp, tn, fp, fn, detection_rate, false_pos_rate,
                mislabeled, test_size
            ])
            fold_num += 1
        return all_results
    def __init__(self, low_corner, high_corner):
        """Build a cuboid.

        :param low_corner: low corner
        :type low_corner: numpy.array
        :param high_corner: high corner
        :type high_corner: numpy.array
        """
        if len(low_corner) != len(high_corner):
            raise ValueError('corners must have same dimension')
        self.low_corner = array(low_corner, float)
        self.high_corner = array(high_corner, float)
Example #10
0
    def classify(self):
        """Classify DDoS flows using a Random Forest.

        The data passed through to the fit() method cannot be a string
        type.

        :return: Results of the classification.
        """
        classifier = RandomForestClassifier(
            n_estimators=self._config["n_estimators"],
            criterion=self._config["criterion"],
            max_depth=self._config["max_depth"],
            min_samples_split=self._config["min_samples_split"],
            min_samples_leaf=self._config["min_samples_leaf"],
            min_weight_fraction_leaf=self._config["min_weight_fraction_leaf"],
            max_features=self._config["max_features"],
            max_leaf_nodes=self._config["max_leaf_nodes"],
            bootstrap=self._config["bootstrap"],
            oob_score=self._config["oob_score"],
            n_jobs=self._config["n_jobs"],
            random_state=self._config["random_state"],
            verbose=self._config["verbose"],
            warm_start=self._config["warm_start"],
            class_weight=self._config["class_weight"])
        all_results = []  # Results from all fold trials
        fold_num = 1
        for train, test in self._kfold:
            print("\tTraining Random Forest...")
            # NOTE: I have switched the training and testing set around.
            train_array = np_array.array(map(self._data.__getitem__,
                                             test)).astype(np_float)
            train_label_array = np_array.array(
                map(self._labels.__getitem__, test)).astype(np_float)
            classifier.fit(train_array, train_label_array)
            print("\tTesting classifier...")
            test_array = np_array.array(map(self._data.__getitem__,
                                            train)).astype(np_float)
            test_label_array = np_array.array(
                map(self._labels.__getitem__, train)).astype(np_float)
            test_size = len(train)  # Remember the switch of sets!
            pred = classifier.predict(test_array)
            mislabeled = (test_label_array != pred).sum()
            tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
            detection_rate = rc.detection_rate(tp, fn)
            false_pos_rate = rc.false_positive_rate(tn, fp)
            all_results.append([
                fold_num, tp, tn, fp, fn, detection_rate, false_pos_rate,
                mislabeled, test_size
            ])
            fold_num += 1
        return all_results
    def test_var_cond(self):
        assert_almost_equal(variance_condition(self.fa[:, 1]), 1.7638342073763937)
        assert_allclose(variance_condition(self.fa), array([2.23606798, 1.76383421, 5.19615242, 2.23606798]))

        assert_allclose(variance_condition(pd.DataFrame(self.fa)),
                                   array([2.23606798, 1.76383421, 5.19615242, 2.23606798]))

        assert_allclose(variance_condition(list(self.fa)),
                                   array([2.23606798, 1.76383421, 5.19615242, 2.23606798]))

        ff = np.array([np.array(self.f), np.array(self.f)])

        with pytest.raises(ValueError):
            variance_condition(ff)
Example #12
0
def kraus_to_choi(kraus_list):
    """
    Takes a list of Kraus operators and returns the Choi matrix for the channel
    represented by the Kraus operators in `kraus_list`
    """
    kraus_mat_list = list(map(lambda x: matrix(x.data.todense()), kraus_list))
    op_len = len(kraus_mat_list[0])
    op_rng = range(op_len)
    choi_blocks = array([[sum([op[:, c_ix] * array([op.H[r_ix, :]])
                               for op in kraus_mat_list])
                          for r_ix in op_rng]
                         for c_ix in op_rng])
    return Qobj(inpt=hstack(hstack(choi_blocks)),
                dims=[kraus_list[0].dims, kraus_list[0].dims])
Example #13
0
def kraus_to_choi(kraus_list):
    """
    Takes a list of Kraus operators and returns the Choi matrix for the channel
    represented by the Kraus operators in `kraus_list`
    """
    kraus_mat_list = list(map(lambda x: matrix(x.data.todense()), kraus_list))
    op_len = len(kraus_mat_list[0])
    op_rng = range(op_len)
    choi_blocks = array([[sum([op[:, c_ix] * array([op.H[r_ix, :]])
                               for op in kraus_mat_list])
                          for r_ix in op_rng]
                         for c_ix in op_rng])
    return Qobj(inpt=hstack(hstack(choi_blocks)),
                dims=[kraus_list[0].dims, kraus_list[0].dims])
Example #14
0
def _create_J_without_numba(Ybus, V, pvpq, pq):
    # create Jacobian with standard pypower implementation.
    dS_dVm, dS_dVa = dSbus_dV(Ybus, V)

    ## evaluate Jacobian
    J11 = dS_dVa[array([pvpq]).T, pvpq].real
    J12 = dS_dVm[array([pvpq]).T, pq].real
    if len(pq) > 0:
        J21 = dS_dVa[array([pq]).T, pvpq].imag
        J22 = dS_dVm[array([pq]).T, pq].imag
        J = vstack([hstack([J11, J12]), hstack([J21, J22])], format="csr")
    else:
        J = vstack([hstack([J11, J12])], format="csr")
    return J
Example #15
0
def bincount(x, weights=None, minlength=None):
    if minlength is None:
        minlength = 0
    else:
        if not isinstance(minlength, (int, long)):
            raise TypeError("an integer is required")
        if minlength <= 0:
            raise ValueError("minlength must be positive")

    x = array(x)
    len_output = minlength
    if len(x) > 0:
        if x.min() < 0:
            raise ValueError("x must not be negative")
        len_output = max(len_output, x.max() + 1)

    if x.dtype.kind not in 'ui':
        raise ValueError("x must be integer")

    if weights is None:
        output = zeros(len_output, dtype=dtype('int'))
        for elem in x:
            output[elem] += 1
    else:
        if len(weights) != len(x):
            raise ValueError("x and weights arrays must have the same size")
        output = zeros(len_output, dtype=dtype('float'))
        for i in range(len(x)):
            output[x[i]] += weights[i]
    return output
    def generate_word_clouds(self):

        bar = progressbar.ProgressBar(widgets=[
            ' [',
            progressbar.Timer(),
            '] ',
            progressbar.Bar(),
            ' (',
            progressbar.ETA(),
            ') ',
        ])

        clean_mask = np.array(Image.open('../../data/images/mask.jpg'))

        nasty_subset = self.train.loc[(self.train['toxic'] == 1)
                                      & (self.train['severe_toxic'] == 1) &
                                      (self.train['obscene'] == 1) &
                                      (self.train['threat'] == 1) &
                                      (self.train['identity_hate'] == 1) &
                                      (self.train['insult'] == 1)]
        nasty_text = [n.lower() for n in nasty_subset.comment_text.values]
        self.nasties = []
        for n in nasty_text:
            if n is not None and len(n) > 0:
                self.nasties.extend(n.split())

        wc = WordCloud(background_color="white",
                       max_words=2000,
                       mask=clean_mask,
                       stopwords=self.mystops)
        wc.generate(" ".join(self.nasties))
        plt.figure(figsize=(20, 10))
        plt.axis("off")
        plt.title('Most Frequent Nasty Words', fontsize=20)
        plt.imshow(wc.recolor(colormap='viridis', random_state=17), alpha=0.98)
        plt.savefig('../../results/figures/nasty_wordcloud.jpg')
        #plt.show()
        with open(self.nasty_path, 'wb') as nasty_out:
            pickle.dump(self.nasties, nasty_out, pickle.HIGHEST_PROTOCOL)

        for index in bar(range(len(self.categories))):
            cat = self.categories[index]
            #clean_mask=clean_mask[:,:,1]
            #wordcloud for clean comments
            subset = self.train.loc[self.train[cat] == 1]
            text = subset.comment_text.values

            wc = WordCloud(background_color="white",
                           max_words=2000,
                           mask=clean_mask,
                           stopwords=self.mystops)
            wc.generate(" ".join(text))
            plt.figure(figsize=(20, 10))
            plt.axis("off")
            plt.title('Most Frequent Words in ' + cat + ' Comments',
                      fontsize=20)
            plt.imshow(wc.recolor(colormap='viridis', random_state=17),
                       alpha=0.98)
            plt.savefig('../../results/figures/' + cat + '_wordcloud.jpg')
Example #17
0
  def default(self):
    'Base controller for application.'
    readings = self._read_logfile()
    self.readings_array = array(readings)[:, 0]

    self._print_distribution_stats()
    self._print_histogram('Full distribution histogram')
    self._print_zoom_histograms()
Example #18
0
    def convert_props(self, real_props=None, fake_props=None):
        new_props = []
        if real_props is not None:
            if len(real_props) > 0:
                assert isinstance(real_props[0], ThreatEnvironmentProperties)
                for real_prop in real_props:
                    assert isinstance(real_prop, ThreatEnvironmentProperties)
                    assert len(real_prop.theProperties) == len(
                        real_prop.theRationale)
                    new_attrs = []
                    for idx in range(0, len(real_prop.theProperties)):
                        attr_name = self.rev_attr_dict.get(idx)
                        attr_value = self.prop_dict[
                            real_prop.theProperties[idx]]
                        attr_rationale = real_prop.theRationale[idx]
                        new_attr = SecurityAttribute(attr_name, attr_value,
                                                     attr_rationale)
                        new_attrs.append(new_attr)
                    real_prop.theProperties = new_attrs
                    del real_prop.theRationale
                    new_props.append(real_prop)

            return new_props
        elif fake_props is not None:
            if len(fake_props) > 0:
                check_required_keys(fake_props[0],
                                    ThreatEnvironmentPropertiesModel.required)
                for fake_prop in fake_props:
                    check_required_keys(
                        fake_prop, ThreatEnvironmentPropertiesModel.required)
                    new_ndprops = array([0] * 8).astype(numpy.core.int32)
                    new_ratios = ['None'] * 8
                    for idx in range(0, len(fake_prop['theProperties'])):
                        new_attr = fake_prop['theProperties'][idx]
                        check_required_keys(new_attr,
                                            SecurityAttribute.required)
                        attr_id = self.attr_dict.get(new_attr['name'], -1)
                        if -1 < attr_id < len(self.attr_dict):
                            attr_value = self.rev_prop_dict[new_attr['value']]
                            attr_rationale = new_attr['rationale']
                            new_ndprops[attr_id] = attr_value
                            new_ratios[attr_id] = attr_rationale
                    fake_prop['theProperties'] = new_ndprops
                    fake_prop['theRationale'] = new_ratios
                    new_prop = ThreatEnvironmentProperties(
                        environmentName=fake_prop['theEnvironmentName'],
                        lhood=fake_prop['theLikelihood'],
                        assets=fake_prop['theAssets'],
                        attackers=fake_prop['theAttackers'],
                        pRationale=fake_prop['theRationale'],
                        syProperties=fake_prop['theProperties'])
                    new_props.append(new_prop)

            return new_props
        else:
            self.close()
            raise MissingParameterHTTPError(
                param_names=['real_props', 'fake_props'])
Example #19
0
def choi_to_kraus(q_oper):
    """
    Takes a Choi matrix and returns a list of Kraus operators.
    TODO: Create a new class structure for quantum channels, perhaps as a
    strict sub-class of Qobj.
    """
    vals, vecs = eig(q_oper.data.todense())
    vecs = [array(_) for _ in zip(*vecs)]
    return [Qobj(inpt=sqrt(val)*vec2mat(vec)) for val, vec in zip(vals, vecs)]
Example #20
0
def create_J_without_numba(Ybus, V, pvpq, pq):
    """
    Standard matpower-like implementation
    :param Ybus:
    :param V:
    :param pvpq:
    :param pq:
    :return:
    """
    dS_dVm, dS_dVa = dSbus_dV(Ybus, V)

    # evaluate Jacobian
    J11 = dS_dVa[array([pvpq]).T, pvpq].real
    J12 = dS_dVm[array([pvpq]).T, pq].real
    J21 = dS_dVa[array([pq]).T, pvpq].imag
    J22 = dS_dVm[array([pq]).T, pq].imag
    J = vstack([hstack([J11, J12]), hstack([J21, J22])], format="csr")
    return J
Example #21
0
    def classify(self):
        """Classify DDoS flows using K-Nearest Neighbours.

        The data passed through to the fit() method cannot be a string
        type.

        :return: Results of the classification.
        """
        classifier = KNeighborsClassifier(
            n_neighbors=self._config["n_neighbors"],
            weights=self._config["weights"],
            algorithm=self._config["algorithm"],
            leaf_size=self._config["leaf_size"],
            metric=self._config["metric"],
            p=self._config["p"],
            metric_params=self._config["metric_params"],
            n_jobs=self._config["n_jobs"])
        all_results = []  # Results from all fold trials
        fold_num = 1
        for train, test in self._kfold:
            print("\tTraining K-Nearest Neighbours...")
            # NOTE: I have switched the training and testing set around.
            train_array = np_array.array(map(self._data.__getitem__,
                                             test)).astype(np_float)
            train_label_array = np_array.array(
                map(self._labels.__getitem__, test)).astype(np_float)
            classifier.fit(train_array, train_label_array)
            print("\tTesting classifier...")
            test_array = np_array.array(map(self._data.__getitem__,
                                            train)).astype(np_float)
            test_label_array = np_array.array(
                map(self._labels.__getitem__, train)).astype(np_float)
            test_size = len(train)  # Remember the switch of sets!
            pred = classifier.predict(test_array)
            mislabeled = (test_label_array != pred).sum()
            tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
            detection_rate = rc.detection_rate(tp, fn)
            false_pos_rate = rc.false_positive_rate(tn, fp)
            all_results.append([
                fold_num, tp, tn, fp, fn, detection_rate, false_pos_rate,
                mislabeled, test_size
            ])
            fold_num += 1
        return all_results
Example #22
0
def reverse_3(input, original):
    original= array(original)
#ideally, we would have a recursive function that 
#would calculate four indices based on the top left corner
#and then calculate the rest, but I take the simple route 
    noise = input - original
    smallsquare = noise[:noise.shape[0]/float(2),:noise.shape[1]/float(2)]
    byrow = append(smallsquare, smallsquare, 0)
    bycolumnrow = append(byrow, byrow, 1)
    clean = input-bycolumnrow
    return clean
def choi_to_kraus(q_oper):
    """
    Takes a Choi matrix and returns a list of Kraus operators.
    TODO: Create a new class structure for quantum channels, perhaps as a
    strict sub-class of Qobj.
    """
    vals, vecs = eig(q_oper.data.todense())
    vecs = [array(_) for _ in zip(*vecs)]
    return [
        Qobj(inpt=sqrt(val) * vec2mat(vec)) for val, vec in zip(vals, vecs)
    ]
Example #24
0
 def getMostSevereValue(self, minNInstants=1): # TODO use np.percentile
     from matplotlib.mlab import find
     from numpy.core.multiarray import array
     from numpy.core.fromnumeric import mean
     values = array(self.values.values())
     indices = range(len(values))
     if len(indices) >= minNInstants:
         values = sorted(values[indices], reverse = self.mostSevereIsMax) # inverted if most severe is max -> take the first values
         return mean(values[:minNInstants])
     else:
         return None
    def classify(self):
        """Classify DDoS flows using a Support Vector Machine.

        Note that SVM cannot handle too many data points for training.
        The exact number however is not currently known... Therefore use
        the StratifiedKFold object to obtain an even smaller training
        set. Alternatively, switch the training and testing sets around.
        It's an ugly hack...
        
        The data passed through to the fit() method cannot be a string
        type.

        :return: Results of the classification.
        """
        all_results = []  # Results from all fold trials
        fold_num = 1
        for train, test in self._kfold:
            print("\tTraining SVM...")
            # NOTE: I have switched the training and testing set around.
            train_array = np_array.array(map(self._data.__getitem__,
                                             test)).astype(np_float)
            train_label_array = np_array.array(
                map(self._labels.__getitem__, test)).astype(np_float)
            self._classifier.fit(train_array, train_label_array)
            print("\tTesting classifier...")
            test_array = np_array.array(map(self._data.__getitem__,
                                            train)).astype(np_float)
            test_label_array = np_array.array(
                map(self._labels.__getitem__, train)).astype(np_float)
            test_size = len(train)  # Remember the switch of sets!
            pred = self._classifier.predict(test_array)
            mislabeled = (test_label_array != pred).sum()
            tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
            detection_rate = rc.detection_rate(tp, fn)
            false_pos_rate = rc.false_positive_rate(tn, fp)
            all_results.append([
                fold_num, tp, tn, fp, fn, detection_rate, false_pos_rate,
                mislabeled, test_size
            ])
            fold_num += 1
        return all_results
Example #26
0
def choi_to_kraus(q_oper, tol=1e-9):
    """
    Takes a Choi matrix and returns a list of Kraus operators.
    TODO: Create a new class structure for quantum channels, perhaps as a
    strict sub-class of Qobj.
    """
    vals, vecs = eig(q_oper.data.todense())
    vecs = [array(_) for _ in zip(*vecs)]
    shape = [np.prod(q_oper.dims[0][i]) for i in range(2)][::-1]
    return [Qobj(inpt=sqrt(val)*vec2mat(vec, shape=shape),
            dims=q_oper.dims[0][::-1])
            for val, vec in zip(vals, vecs) if abs(val) >= tol]
Example #27
0
    def draw_pz(self, tfcn):
        """Draw pzmap"""
        self.f_pzmap.clf()
        # Make adaptive window size, with min [-10, 10] in range,
        # always atleast 25% extra space outside poles/zeros
        tmp = list(self.zeros)+list(self.poles)+[8]
        val = 1.25*max(abs(array(tmp)))
        plt.figure(self.f_pzmap.number)
        control.matlab.pzmap(tfcn)
        plt.suptitle('Pole-Zero Diagram')

        plt.axis([-val, val, -val, val])
Example #28
0
def _dep_super(pe):
    """
    Returns the superoperator corresponding to qubit depolarization for a
    given parameter pe.

    TODO: if this is going into production (hopefully it isn't) then check
    CPTP, expand to arbitrary dimensional systems, etc.
    """
    return Qobj(dims=[[[2], [2]], [[2], [2]]],
                inpt=array([[1. - pe / 2., 0., 0., pe / 2.],
                            [0., 1. - pe, 0., 0.], [0., 0., 1. - pe, 0.],
                            [pe / 2., 0., 0., 1. - pe / 2.]]))
Example #29
0
def build_selection_matrix(results, teams):
    matrix = []

    for result in results:
        index_a, index_b = teams[result['teamHome']], teams[result['teamAway']]
        line = [0] * len(teams)
        line[index_a] = 1
        line[index_b] = -1

        matrix.append(line)

    return array(matrix)
Example #30
0
def build_selection_matrix(results, teams):
    matrix = []

    for result in results:
        index_a, index_b = teams[result['teamHome']], teams[result['teamAway']]
        line = [0] * len(teams)
        line[index_a] = 1
        line[index_b] = -1

        matrix.append(line)

    return array(matrix)
Example #31
0
def decrypt(result, division):
    mainmsg = ""
    mylist = []
    for i in result:
        mylist.append(ord(i) - 65)
    cipher = matrix(array(mylist).reshape(3, 4))
    cipher = division * 26 + cipher
    plain = cipher * inv(hkey)
    for i in range(3):
        for j in range(4):
            mainmsg += chr(round(plain.item(i, j)) + 65)
    return mainmsg
    def draw_pz(self, tfcn):
        """Draw pzmap"""
        self.f_pzmap.clf()
        # Make adaptive window size, with min [-10, 10] in range,
        # always atleast 25% extra space outside poles/zeros
        tmp = list(self.zeros)+list(self.poles)+[8]
        val = 1.25*max(abs(array(tmp)))
        plt.figure(self.f_pzmap.number)
        control.matlab.pzmap(tfcn)
        plt.suptitle('Pole-Zero Diagram')

        plt.axis([-val, val, -val, val])
Example #33
0
def _generalized_kraus(q_oper, thresh=1e-10):
    # TODO: document!
    # TODO: use this to generalize to_kraus to the case where U != V.
    #       This is critical for non-CP maps, as appear in (for example)
    #       diamond norm differences between two CP maps.
    if q_oper.type != "super" or q_oper.superrep != "choi":
        raise ValueError(
            "Expected a Choi matrix, got a {} (superrep {}).".format(
                q_oper.type, q_oper.superrep))

    # Remember the shape of the underlying space,
    # as we'll need this to make Kraus operators later.
    dL, dR = map(int, map(sqrt, q_oper.shape))
    # Also remember the dims breakout.
    out_dims, in_dims = q_oper.dims
    out_left, out_right = out_dims
    in_left, in_right = in_dims

    # Find the SVD.
    U, S, V = svd(q_oper.data.todense())

    # Truncate away the zero singular values, up to a threshold.
    nonzero_idxs = S > thresh
    dK = nonzero_idxs.sum()
    U = array(U)[:, nonzero_idxs]
    # We also want S to be a single index array, which np.matrix
    # doesn't allow for. This is stripped by calling array() on it.
    S = sqrt(array(S)[nonzero_idxs])
    # Since NumPy returns V and not V+, we need to take the dagger
    # to get back to quantum info notation for Stinespring pairs.
    V = array(V.conj().T)[:, nonzero_idxs]

    # Next, we convert each of U and V into Kraus operators.
    # Finally, we want the Kraus index to be left-most so that we
    # can map over it when making Qobjs.
    # FIXME: does not preserve dims!
    kU = _svd_u_to_kraus(U, S, dL, dK, out_right, out_left)
    kV = _svd_u_to_kraus(V, S, dL, dK, in_right, in_left)

    return kU, kV
Example #34
0
    def distance(self, other):
        """Return distance to other Cuboid.

        Distance is the minimal Euclidean distance between points in self and points in other.

        :param other: other cuboid
        :type other: Cuboid

        :return: distance
        :rtype: float
        """
        dimension = len(self.low_corner)
        distance1 = self.low_corner - other.low_corner
        distance2 = self.low_corner - other.high_corner
        distance3 = self.high_corner - other.low_corner
        distance4 = self.high_corner - other.high_corner
        distance_matrix = array((distance1, distance2, distance3, distance4))
        checks = abs(numpy.sum(numpy.sign(distance_matrix),
                               0)) == 4 * numpy.ones(dimension)
        distance_vector = array(checks, dtype=float)
        min_vector = numpy.amin(abs(distance_matrix), axis=0)
        return numpy.linalg.norm(min_vector * distance_vector)
Example #35
0
def _dep_super(pe):
    """
    Returns the superoperator corresponding to qubit depolarization for a
    given parameter pe.

    TODO: if this is going into production (hopefully it isn't) then check
    CPTP, expand to arbitrary dimensional systems, etc.
    """
    return Qobj(dims=[[[2], [2]], [[2], [2]]],
                inpt=array([[1. - pe / 2., 0., 0., pe / 2.],
                            [0., 1. - pe, 0., 0.],
                            [0., 0., 1. - pe, 0.],
                            [pe / 2., 0., 0., 1. - pe / 2.]]))
Example #36
0
    def convert_props(self, real_props=None, fake_props=None):
        new_props = []
        if real_props is not None:
            if len(real_props) > 0:
                assert isinstance(real_props[0], ThreatEnvironmentProperties)
                for real_prop in real_props:
                    assert isinstance(real_prop, ThreatEnvironmentProperties)
                    assert len(real_prop.theProperties) == len(real_prop.theRationale)
                    new_attrs = []
                    for idx in range(0, len(real_prop.theProperties)):
                        attr_name = self.rev_attr_dict.get(idx)
                        attr_value = self.prop_dict[real_prop.theProperties[idx]]
                        attr_rationale = real_prop.theRationale[idx]
                        new_attr = SecurityAttribute(attr_name, attr_value, attr_rationale)
                        new_attrs.append(new_attr)
                    real_prop.theProperties = new_attrs
                    new_props.append(real_prop)

            return new_props
        elif fake_props is not None:
            if len(fake_props) > 0:
                check_required_keys(fake_props[0], ThreatEnvironmentPropertiesModel.required)
                for fake_prop in fake_props:
                    check_required_keys(fake_prop, ThreatEnvironmentPropertiesModel.required)
                    new_ndprops = array([0]*8).astype(numpy.core.int32)
                    new_ratios = ['None']*8
                    for idx in range(0, len(fake_prop['theProperties'])):
                        new_attr = fake_prop['theProperties'][idx]
                        check_required_keys(new_attr, SecurityAttribute.required)
                        attr_id = self.attr_dict.get(new_attr['name'], -1)
                        if -1 < attr_id < len(self.attr_dict):
                            attr_value = self.rev_prop_dict[new_attr['value']]
                            attr_rationale = new_attr['rationale']
                            new_ndprops[attr_id] = attr_value
                            new_ratios[attr_id] = attr_rationale
                    fake_prop['theProperties'] = new_ndprops
                    fake_prop['theRationale'] = new_ratios
                    new_prop = ThreatEnvironmentProperties(
                        environmentName=fake_prop['theEnvironmentName'],
                        lhood=fake_prop['theLikelihood'],
                        assets=fake_prop['theAssets'],
                        attackers=fake_prop['theAttackers'],
                        pRationale=fake_prop['theRationale'],
                        syProperties=fake_prop['theProperties']
                    )
                    new_props.append(new_prop)

            return new_props
        else:
            self.close()
            raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])
Example #37
0
def _generalized_kraus(q_oper, thresh=1e-10):
    # TODO: document!
    # TODO: use this to generalize to_kraus to the case where U != V.
    #       This is critical for non-CP maps, as appear in (for example)
    #       diamond norm differences between two CP maps.
    if q_oper.type != "super" or q_oper.superrep != "choi":
        raise ValueError("Expected a Choi matrix, got a {} (superrep {}).".format(q_oper.type, q_oper.superrep))
    
    # Remember the shape of the underlying space,
    # as we'll need this to make Kraus operators later.
    dL, dR = map(int, map(sqrt, q_oper.shape))
    # Also remember the dims breakout.
    out_dims, in_dims = q_oper.dims
    out_left, out_right = out_dims
    in_left, in_right = in_dims

    # Find the SVD.
    U, S, V = svd(q_oper.data.todense())

    # Truncate away the zero singular values, up to a threshold.
    nonzero_idxs = S > thresh
    dK = nonzero_idxs.sum()
    U = array(U)[:, nonzero_idxs]
    # We also want S to be a single index array, which np.matrix
    # doesn't allow for. This is stripped by calling array() on it.
    S = sqrt(array(S)[nonzero_idxs])
    # Since NumPy returns V and not V+, we need to take the dagger
    # to get back to quantum info notation for Stinespring pairs.
    V = array(V.conj().T)[:, nonzero_idxs]

    # Next, we convert each of U and V into Kraus operators.
    # Finally, we want the Kraus index to be left-most so that we
    # can map over it when making Qobjs.
    # FIXME: does not preserve dims!
    kU = _svd_u_to_kraus(U, S, dL, dK, out_right, out_left)
    kV = _svd_u_to_kraus(V, S, dL, dK, in_right, in_left)

    return kU, kV
Example #38
0
def encrypt(msg):
    mylist = []
    result = ""
    for i in msg:
        mylist.append(ord(i) - 65)
    plain = matrix(array(mylist).reshape(3, 4))
    cipher = plain * hkey
    division = cipher / 26
    division = division.astype(int)
    cipher %= 26
    for i in range(3):
        for j in range(4):
            result += chr(cipher.item(i, j) + 65)
    return (result, cipher, division)
Example #39
0
def _svd_u_to_kraus(U, S, d, dK, indims, outdims):
    """
    Given a partial isometry U and a vector of square-roots of singular values S
    obtained from an SVD, produces the Kraus operators represented by U.

    Returns
    -------
    Ks : list of Qobj
        Quantum objects represnting each of the Kraus operators.
    """
    # We use U * S since S is 1-index, such that this is equivalent to
    # U . diag(S), but easier to write down.
    Ks = list(map(Qobj, array(U * S).reshape((d, d, dK), order='F').transpose((2, 0, 1))))
    for K in Ks:
        K.dims = [outdims, indims]
    return Ks
Example #40
0
def _svd_u_to_kraus(U, S, d, dK, indims, outdims):
    """
    Given a partial isometry U and a vector of square-roots of singular values S
    obtained from an SVD, produces the Kraus operators represented by U.

    Returns
    -------
    Ks : list of Qobj
        Quantum objects represnting each of the Kraus operators.
    """
    # We use U * S since S is 1-index, such that this is equivalent to
    # U . diag(S), but easier to write down.
    Ks = list(map(Qobj, array(U * S).reshape((d, d, dK), order='F').transpose((2, 0, 1))))
    for K in Ks:
        K.dims = [outdims, indims]
    return Ks
Example #41
0
def full(shape, fill_value, dtype=None, order='C'):
    """
    Return a new array of given shape and type, filled with `fill_value`.

    Parameters
    ----------
    shape : int or sequence of ints
        Shape of the new array, e.g., ``(2, 3)`` or ``2``.
    fill_value : scalar
        Fill value.
    dtype : data-type, optional
        The desired data-type for the array  The default, `None`, means
         `np.array(fill_value).dtype`.
    order : {'C', 'F'}, optional
        Whether to store multidimensional data in C- or Fortran-contiguous
        (row- or column-wise) order in memory.

    Returns
    -------
    out : ndarray
        Array of `fill_value` with the given shape, dtype, and order.

    See Also
    --------
    zeros_like : Return an array of zeros with shape and type of input.
    ones_like : Return an array of ones with shape and type of input.
    empty_like : Return an empty array with shape and type of input.
    full_like : Fill an array with shape and type of input.
    zeros : Return a new array setting values to zero.
    ones : Return a new array setting values to one.
    empty : Return a new uninitialized array.

    Examples
    --------
    >>> mp.full((2, 2), np.inf)
    micarray([[ inf,  inf],
           [ inf,  inf]])
    >>> mp.full((2, 2), 10)
    micarray([[10, 10],
           [10, 10]])

    """
    if dtype is None:
        dtype = array(fill_value).dtype
    a = empty(shape, dtype, order)
    multiarray.copyto(a, fill_value, casting='unsafe')
    return a
Example #42
0
def indicatorMapFromPolygon(value, polygon, squareSize):
    '''Fills an indicator map with the value within the polygon
    (array of Nx2 coordinates of the polygon vertices)'''
    import matplotlib.nxutils as nx
    from numpy.core.multiarray import array, arange
    from numpy import floor

    points = []
    for x in arange(min(polygon[:,0])+squareSize/2, max(polygon[:,0]), squareSize):
        for y in arange(min(polygon[:,1])+squareSize/2, max(polygon[:,1]), squareSize):
            points.append([x,y])
    inside = nx.points_inside_poly(array(points), polygon)
    indicatorMap = {}
    for i in xrange(len(inside)):
        if inside[i]:
            indicatorMap[(floor(points[i][0]/squareSize), floor(points[i][1]/squareSize))] = 0
    return indicatorMap
Example #43
0
def kraus_to_choi(kraus_list):
    """
    Takes a list of Kraus operators and returns the Choi matrix for the channel
    represented by the Kraus operators in `kraus_list`
    """
    kraus_mat_list = list(map(lambda x: x.data.toarray(), kraus_list))
    op_rng = range(kraus_mat_list[0].shape[0])
    choi_blocks = array([[
        sum([
            np.outer(op[:, c_ix],
                     np.transpose(np.conjugate(op))[r_ix, :])
            for op in kraus_mat_list
        ]) for r_ix in op_rng
    ] for c_ix in op_rng])
    return Qobj(inpt=hstack(hstack(choi_blocks)),
                dims=[kraus_list[0].dims[::-1], kraus_list[0].dims[::-1]],
                type='super',
                superrep='choi')
Example #44
0
 def convert_props(self, real_props=None, fake_props=None, rationales=None):
   prop_dict = {}
   prop_dict['None'] = 0
   prop_dict['Low'] = 1
   prop_dict['Medium'] = 2
   prop_dict['High'] = 3
   rev_prop_dict = {}
   rev_prop_dict[0] = 'None'
   rev_prop_dict[1] = 'Low'
   rev_prop_dict[2] = 'Medium'
   rev_prop_dict[3] = 'High'
   new_props = []
   if real_props is not None:
     if len(real_props) > 0:
       new_sec_attrs = []
       for idx in range(0, len(real_props)):
         try:
           attr_name = self.rev_attr_dict[idx]
           attr_value = rev_prop_dict[real_props[idx]]
           new_sec_attr = SecurityAttribute(attr_name, attr_value, rationales[idx])
           new_props.append(new_sec_attr)
         except LookupError:
           self.logger.warning('Unable to find key in dictionary. Attribute is being skipped.')
     return new_props
   elif fake_props is not None:
     if len(fake_props) > 0:
       new_props = array(8 * [0]).astype(numpy.int32)
       new_rationale = ['None'] * 8
       for sec_attr in fake_props:
         attr_id = self.attr_dict[sec_attr['name']]
         attr_value = prop_dict[sec_attr['value']]
         attr_rationale = sec_attr['rationale']
         new_props[attr_id] = attr_value
         new_rationale[attr_id] = attr_rationale
     return (new_props,new_rationale)
   else:
     self.close()
     raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])
Example #45
0
    if sctype is None:
        raise ValueError, "unrecognized type"
    return _sctype2char_dict[sctype]

# Create dictionary of casting functions that wrap sequences
# indexed by type or type character


cast = _typedict()
ScalarType = [_types.IntType, _types.FloatType,
              _types.ComplexType, _types.LongType, _types.BooleanType,
              _types.StringType, _types.UnicodeType, _types.BufferType]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
    cast[key] = lambda x, k=key : array(x, copy=False).astype(k)


_unicodesize = array('u','U1').itemsize

# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
    if issubclass(key, allTypes['flexible']):
        _typestr[key] = _sctype2char_dict[key]
    else:
        _typestr[key] = empty((1,),key).dtype.str[1:]

# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
    if val not in sctypeDict:
Example #46
0
# indexed by type or type character


cast = _typedict()
try:
    ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
                  _types.LongType, _types.BooleanType,
                   _types.StringType, _types.UnicodeType, _types.BufferType]
except AttributeError:
    # Py3K
    ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]

ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
    cast[key] = lambda x, k=key : array(x, copy=False).astype(k)

# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
    if issubclass(key, allTypes['flexible']):
        _typestr[key] = _sctype2char_dict[key]
    else:
        _typestr[key] = empty((1,), key).dtype.str[1:]

# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
    if val not in sctypeDict:
        sctypeDict[val] = key

# Add additional strings to the sctypeDict
Example #47
0
 def convert_props(self, real_props=None, fake_props=None):
   new_props = []
   if real_props is not None:
     if len(real_props) > 0:
       for real_prop in real_props:
         assert isinstance(real_prop, CountermeasureEnvironmentProperties)
         ctList = []
         for ctc in real_prop.personas():
           ctList.append(CountermeasureTaskCharacteristics(ctc[0],ctc[1],ctc[2],ctc[3],ctc[4],ctc[5]))
         real_prop.thePersonas = ctList
         assert len(real_prop.theProperties) == len(real_prop.theRationale)
         new_attrs = []
         for idx in range(0, len(real_prop.theProperties)):
           attr_name = self.rev_attr_dict.get(idx)
           attr_value = self.prop_dict[real_prop.theProperties[idx]]
           attr_rationale = real_prop.theRationale[idx]
           new_attr = SecurityAttribute(attr_name, attr_value, attr_rationale)
           new_attrs.append(new_attr)
         real_prop.theProperties = new_attrs
         new_targets = []
         for idx in range(0, len(real_prop.theTargets)):
           tName = real_prop.theTargets[idx].name()
           tEff = real_prop.theTargets[idx].effectiveness()
           tRat = real_prop.theTargets[idx].rationale()
           new_targets.append(CountermeasureTarget(tName,tEff,tRat))
         real_prop.theTargets = new_targets
         new_props.append(real_prop)
     return new_props
   elif fake_props is not None:
     if len(fake_props) > 0:
       for fake_prop in fake_props:
         check_required_keys(fake_prop, CountermeasureEnvironmentPropertiesModel.required)
         ctList = []
         for ctc in fake_prop['thePersonas']:
           ctList.append([ctc['theTask'],ctc['thePersona'],ctc['theDuration'],ctc['theFrequency'],ctc['theDemands'],ctc['theGoalConflict']])
         fake_prop['thePersonas'] = ctList
         new_ndprops = array([0]*8).astype(numpy.core.int32)
         new_ratios = ['None']*8
         for idx in range(0, len(fake_prop['theProperties'])):
           new_attr = fake_prop['theProperties'][idx]
           check_required_keys(new_attr, SecurityAttribute.required)
           attr_id = self.attr_dict.get(new_attr['name'], -1)
           if -1 < attr_id < len(self.attr_dict):
             attr_value = self.rev_prop_dict[new_attr['value']]
             attr_rationale = new_attr['rationale']
             new_ndprops[attr_id] = attr_value
             new_ratios[attr_id] = attr_rationale
         fake_prop['theProperties'] = new_ndprops
         fake_prop['theRationale'] = new_ratios
         new_targets = []
         for idx in range(0, len(fake_prop['theTargets'])):
           tName = fake_prop['theTargets'][idx]['theName']
           tEff = fake_prop['theTargets'][idx]['theEffectiveness']
           tRat = fake_prop['theTargets'][idx]['theRationale']
           new_targets.append(CountermeasureTarget(tName,tEff,tRat))
         fake_prop['theTargets'] = new_targets
         check_required_keys(fake_prop, CountermeasureEnvironmentPropertiesModel.required)
         new_prop = CountermeasureEnvironmentProperties(
                      environmentName=fake_prop['theEnvironmentName'],
                      requirements=fake_prop['theRequirements'],
                      targets=fake_prop['theTargets'],
                      properties=fake_prop['theProperties'],
                      rationale=fake_prop['theRationale'],
                      cost=fake_prop['theCost'],
                      roles=fake_prop['theRoles'],
                      personas=fake_prop['thePersonas']
                    )
         new_props.append(new_prop)
     return new_props
   else:
     self.close()
     raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])
Example #48
0
def extract_noise(input_image_dir, input_annotation_dir, out_dir, callback, max_width=96, instance_id=None, num_instances=None):
    """
    @type input_image_dir: string
    @type input_annotation_dir: string
    @type out_dir: string
    @type callback: function
    """

    annotations = parse_annotations(input_annotation_dir)
    image_paths = search_files_by_extension(input_image_dir, ["JPG"])
    for image_path in image_paths:
        image = cv2.imread(image_path)
        image_filename = path_to_filename(image_path)

        # Distributed processing
        if instance_id is not None and num_instances is not None:
            if (int(hashlib.md5(image_path).hexdigest(), 16) % num_instances) != instance_id:
                continue
            print "instance id: {}. total number of instances {}".format(instance_id, num_instances)
            print "processing file {}".format(image_path)

        try:
            annotation = annotations[image_filename]
            image_info = annotation['image info']
        except:
            print "annotation is not available for {}".format(image_filename)
            continue

        (img_w, img_h) = image.shape[1], image.shape[0]
        if (img_w, img_h) != (image_info['w'], image_info['h']):
            image = cv2.resize(image, (image_info['w'], image_info['h']))

        theta = image_info['orientation']
        dthetas = [0, 15]
        for dtheta in dthetas:
            orientation = -theta + dtheta
            rotated_image = rotate_image(image, orientation)

            print theta, orientation

            positive_rects = []
            border_rects = None
            image_rect = [-image.shape[1]/2, -image.shape[0]/2, image.shape[1], image.shape[0]]
            for k, v in annotation['rects'].items():
                if k == 'border':
                    border_rects = v
                else:
                    positive_rects += v

            if border_rects is None:
                print 'border rectangle must exist.'
                continue

            # index 0: original image, 1 to k: product border, k+1 to n-1: positive samples
            rects = np.array([image_rect] + border_rects + positive_rects)
            # change the origin to top left and handle rotations
            rects[:, 0] += rotated_image.shape[1]/2.0
            rects[:, 1] += rotated_image.shape[0]/2.0
            poly_rects = rect_to_polygon(rects)
            rotation_center = (rotated_image.shape[1]/2.0, rotated_image.shape[0]/2.0)
            rotated_poly_rects = rotate_rects(poly_rects, rotation_center, dtheta)

            p_image = rotated_poly_rects[0]
            p_borders = rotated_poly_rects[1:len(border_rects)+1]
            p_positives = rotated_poly_rects[len(border_rects)+1:]

            # plot_polygons_on_image(rotated_image, [image_poly, border_poly, positive_poly], ['red', 'blue', 'yellow'])

            r_img_w = rotated_image.shape[1]
            r_img_h = rotated_image.shape[0]

            min_size = 80
            max_size = min(r_img_w, r_img_h)
            window_size_growth = 1.6

            size = min_size
            while size <= max_size:
            	skip_size = max(size/2, 50)
                for x in range(0, r_img_w - size, skip_size):
                    for y in range(0, r_img_h - size, skip_size):
                        p_window = rect_to_polygon(array([(x, y, size, size)]))[0]
                        window_area = size**2 - 1  # subtract 1 in case of floating point errors
                        win_img_overlap = find_overlapping_polygon_area(p_window, p_image)
                        if win_img_overlap is None or win_img_overlap < window_area:
                            continue
                        skip = False
                        for p_border in p_borders:
                            win_border_overlap = find_overlapping_polygon_area(p_window, p_border)
                            if win_img_overlap is None or win_border_overlap < window_area*0.75:
                                skip = True
                                break
                        if skip:
                            continue
                        for p_positive in p_positives:
                            win_pos_overlap = find_overlapping_polygon_area(p_window, p_positive)
                            if win_pos_overlap is not None and win_pos_overlap > window_area*0.55:
                                skip = True
                                break
                        if skip:
                            continue

                        patch_info = {
                            'patch': {
                                'x': x,
                                'y': y,
                                'w': size,
                                'h': size,
                                'label': 'negative',
                                },
                            'source': {
                                'theta': theta,  # orientation
                                'dtheta': dtheta,
                                'path': image_path,
                                },
                            'out dir': out_dir
                        }

                        img_patch = crop_images(rotated_image, [(x, y, size-1, size-1)])[0]

                        if size > max_width:
                            img_patch = cv2.resize(img_patch, (max_width, max_width))

                        callback(img_patch, patch_info)

                size = int(size * window_size_growth)
Example #49
0
def reverse_1(input):
    input = array(input)*2
    return input
Example #50
0
def asarray(a, typecode=None, dtype=None):
    dtype = convtypecode2(typecode, dtype)
    return mu.array(a, dtype, copy=0)
Example #51
0
def optimize_sizes_cobyla(size_bins):
    likelihood_func = likelihood(size_bins)
    initial_guess = array(initial_x(size_bins))
    constraints = get_constraints(size_bins)
    return fmin_cobyla(likelihood_func, initial_guess, constraints, rhobeg=0.1, rhoend=0.01)
Example #52
0
def array(sequence, typecode=None, copy=1, savespace=0, dtype=None):
    dtype = convtypecode2(typecode, dtype)
    return mu.array(sequence, dtype, copy=copy)
Example #53
0
    def convert_props(self, real_props=None, fake_props=None):
        new_props = []
        if real_props is not None:
            if len(real_props) > 0:
                for real_prop in real_props:
                    assert isinstance(real_prop, AssetEnvironmentProperties)
                    asset_values = self.get_asset_values(real_prop.theEnvironmentName)
                    prop_dict = {}
                    for asset_value in asset_values:
                        prop_dict[asset_value.theId] = asset_value.theName

                    for idx in range(0, len(real_prop.theAssociations)):
                        real_prop.theAssociations[idx] = list(real_prop.theAssociations[idx])
                    sec_props = real_prop.theProperties
                    rationales = real_prop.theRationale

                    if len(sec_props) == len(rationales):
                        new_sec_attrs = []
                        for idx in range(0, len(sec_props)):
                            try:
                                attr_name = self.rev_attr_dict[idx]
                                attr_value = prop_dict[sec_props[idx]]
                                new_sec_attr = SecurityAttribute(attr_name, attr_value, rationales[idx])
                                new_sec_attrs.append(new_sec_attr)
                            except LookupError:
                                self.logger.warning('Unable to find key in dictionary. Attribute is being skipped.')
                        real_prop.theProperties = new_sec_attrs
                        delattr(real_prop, 'theRationale')

                    new_props.append(real_prop)
        elif fake_props is not None:
            if len(fake_props) > 0:
                for fake_prop in fake_props:
                    check_required_keys(fake_prop, AssetEnvironmentPropertiesModel.required)
                    asset_values = self.get_asset_values(fake_prop['theEnvironmentName'])
                    rev_prop_dict = {}
                    for asset_value in asset_values:
                        rev_prop_dict[asset_value.theName] = asset_value.theId

                    assert isinstance(fake_prop['theAssociations'], list)
                    for idx in range(0, len(fake_prop['theAssociations'])):
                        fake_prop['theAssociations'][idx] = tuple(fake_prop['theAssociations'][idx])
                    sec_attrs = fake_prop['theProperties']
                    new_syProps = array(8 * [0]).astype(numpy.int32)
                    new_rationale = ['None'] * 8

                    for sec_attr in sec_attrs:
                        attr_id = self.attr_dict[sec_attr['name']]
                        attr_value = rev_prop_dict[sec_attr['value']]
                        attr_rationale = sec_attr['rationale']
                        new_syProps[attr_id] = attr_value
                        new_rationale[attr_id] = attr_rationale

                    new_prop = AssetEnvironmentProperties(
                        environmentName=fake_prop['theEnvironmentName'],
                        syProperties=new_syProps,
                        pRationale=new_rationale,
                        associations=fake_prop['theAssociations']
                    )
                    new_props.append(new_prop)
        else:
            self.close()
            raise MissingParameterHTTPError(param_names=['real_props', 'fake_props'])

        return new_props
Example #54
0
def getIndexOfFirstFloat(eps, x):
    roundX = array([round(el) for el in x])
    return list(abs(roundX - x) < eps).index(False)
Example #55
0
def isIntegers(eps, x):
    roundX = array([round(el) for el in x])
    return (abs(roundX - x) < eps).all()
Example #56
0
 def __init__(self):
   super(HistogramBaseController, self).__init__()
   self.readings_array = array([])
Example #57
0
def build_result_vector(results):
    return array([int(x['goalsHome']) - int(x['goalsAway']) for x in results])