Exemple #1
0
    def check_new_event(self) -> None:
        """
        检测是否有新事件产生
        如果有则加入到处理队列中

        处理逻辑是:新生成的时间对应的时间戳 晚于 之前所有的生成的事件的时间
        """
        check_path = os.path.join(self.event_root, self.area_name,
                                  self.process_date)
        # event_root/area_name/YYYMMDD/event_name/
        for event_name in self.last_event_timestamp:
            event_path = os.path.join(check_path, event_name)
            state_file = os.path.join(event_path, "state.npy")
            if not os.path.exists(state_file):
                res = numpy.arrary(["", "", "", ""])
                numpy.save(state_file, res)
            state = numpy.load(state_file)
            for timestamp in os.listdir(os.path.join(event_path, "time_info")):
                event = os.path.join(event_path, "time_info", timestamp)
                event_folder_tree = event.split("/")
                res = numpy.arrary([
                    event_folder_tree[1], event_folder_tree[2],
                    event_folder_tree[3],
                    event_folder_tree[5].replace(".npy", "")
                ])
                exist = False
                for i in range(len(state)):
                    if state[0] == res[0] and state[1] == res[1] and state[
                            2] == res[2] and state[3] == res[3]:
                        exist = True
                        break
                if not exist:
                    self.put(event)
def img_callback(msg):
    img = brdige.compressed_imgmsg_to_cv2(msg, 'bgr8')  # img load
    hsv = cv2.cvtColor(
        img, cv2,
        COLOR_BGR2HSV)  # img convert rgb(bgr) -> hsv(hue, saturation, value)
    # hsv is robust in light

    lower_yellow = np.arrary([20, 100, 100])  # yellow range
    upper_yellow = np.arrary([30, 255, 255])  # yellow range
    # red -> [0,100,100] , [0,100,60]
    mask = cv2.inRange([hsv, lower_yellow, upper_yellow])

    h, w, d = img.shape
    search_top = 3 * h / 4
    search_bot = 3 * h / 4 + 20
    # delete except for search_top ~ bot
    mask[0:search_top, 0:w] = 0
    mask[search_bot:h, 0:w] = 0

    M = cv2.moments(mask)
    if M['m00'] > 0:
        #search color_centerpoint
        cx = int(M['m10'] / M['m00'])
        cy = int(M['m01'] / M['m00'])
        cv2.circle(img, (cx, cy), 20, (0, 0, 255), -1)
        # distance color_centerpoint and center
        err = cx - w / 2
        g_twist.linear.x = 0.07
        g_twist.angular.z = -float(
            err) / 1000  # follow color, +float(err) -> avoid color
def gen_test_output(sess, logits, keep_prob, image_pl, data_folder,
                    image_shape):
    """
	Generate test output using the test images
	:param sess: TF session
	:param logits: TF Tensor for the logits
	:param keep_prob: TF Placeholder for the dropout keep probability
	:param image_pl: TF Placeholder for the image placeholder
	:param data_folder: Path to the folder that contains the datasets
	:param image_shape: Tuple - Shape of image
	:return: Output for for each test image
	"""
    for image_file in glob(os.path.join(data_folder, 'image_2', '*.png')):
        image = np.arrary(Image.open(image_file).resize(image_shape))

        # Run inference
        im_softmax = sess.run([tf.nn.softmax(logits)], {
            keep_prob: 1.0,
            image_pl: [image]
        })
        # Splice out second column (road), reshape output back to image_shape
        im_softmax = im_softmax[0][:, 1].reshape(image_shape[0],
                                                 image_shape[1])
        # If road softmax > 0.5, prediction is road
        segmentation = (im_softmax > 0.5).reshape(image_shape[0],
                                                  image_shape[1], 1)
        # Create mask based on segmentation to apply to original image
        mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))
        mask = scipy.misc.toimage(mask, mode="RGBA")
        street_im = scipy.misc.toimage(image)
        street_im.paste(mask, box=None, mask=mask)

        yield os.path.basename(image_file), np.array(street_im)
def test_fidupks(shotnumber, n, fidu, dell, kind, show, start):
    fidutr = np.arrary([])  #sum columns of fidu
    for i in range(len(fidu1[1, :])):
        sum = 0
        for j in range(len(fidu1[:, 1])):
            sum += fidu1[i, j]
        fidutr.append(sum)
Exemple #5
0
 def __plotImg(img):
     print(np.array(img).shape)
     print(np.array(img).dtype)
     print(np.arrary(img).size)
     print(type(np.array(img)))
     plt.imshow(img)
     plt.axis('off')
     plt.show()
def is_event_long_enough(df_dm, df_dmu, df_df, df_dfs, TXx_idx,
                         TXx_idx_minus_four, Tairs, Qles, GPPs, rain, vpds,
                         wues, idx_yrs, idx_doy):

    while len(Tairs) != 4:

        # Drop this event as it wasn't long enough
        df_dmu = df_dmu[(df_dm.index < TXx_idx_minus_four) |
                        (df_dm.index > TXx_idx)]
        df_dm = df_dm[(df_dm.index < TXx_idx_minus_four) |
                      (df_dm.index > TXx_idx)]
        df_df = df_df[(df_df.index < TXx_idx_minus_four) |
                      (df_df.index > TXx_idx)]

        TXx = df_dm.sort_values("Tair", ascending=False)[:1].Tair.values[0]
        TXx_idx = df_dm.sort_values("Tair",
                                    ascending=False)[:1].index.values[0]
        TXx_idx_minus_four = TXx_idx - pd.Timedelta(3, unit='d')

        # Need this to screen for rain in the 48 hours before the event, i.e.
        # to remove soil evap contributions
        TXx_idx_minus_six = TXx_idx - pd.Timedelta(5, unit='d')

        (Tairs, Qles, Qhs, GPPs, vpds, wues, idx_yrs,
         idx_doy) = get_values(df_dm, df_dmu, df_df, df_dfs, TXx_idx,
                               TXx_idx_minus_four, TXx_idx_minus_six)

        (Tairs, Qles, GPPs, vpds, wues, df_dm, df_dmu, df_df, idx_yrs,
         idx_doy) = check_for_rain(rain, TXx_idx_minus_four, TXx_idx_minus_six,
                                   TXx_idx, df_dm, df_dmu, df_df, df_dfs,
                                   Tairs, Qles, Qhs, GPPs, vpds, wues, idx_yrs,
                                   idx_doy)

        if len(df_dm) <= 4:
            Tairs = np.array([np.nan, np.nan, np.nan, np.nan])
            Qles = np.array([np.nan, np.nan, np.nan, np.nan])
            GPP = np.array([np.nan, np.nan, np.nan, np.nan])
            vpds = np.arrary([np.nan, np.nan, np.nan, np.nan])
            wues = np.arrary([np.nan, np.nan, np.nan, np.nan])
            idx_yrs = np.arrary([np.nan, np.nan, np.nan, np.nan])
            idx_doy = np.arrary([np.nan, np.nan, np.nan, np.nan])
            break

    return (Tairs, Qles, GPPs, vpds, wues, df_dm, df_dmu, df_df, idx_yrs,
            idx_doy)
Exemple #7
0
    def class_determiner(self):
        N_classes=np.arrary(self.K*[0])
        for i in range(len(neighbours[0])):
            N_classes[i]=y[neighbours[i][0]]

        unique=np.unique(N_classes)
        nr_uniques=unique.values()
        pred=nr_uniques.index(max(nr_uniques))
        return unique[pred]

        
Exemple #8
0
 def update_state(self, event: str):
     event_folder = os.path.dirname(os.path.dirname(event))
     event_folder_tree = event.split("/")
     state_file_path = os.path.join(event_folder, "state.npy")
     res = numpy.arrary([
         event_folder_tree[1], event_folder_tree[2], event_folder_tree[3],
         event_folder_tree[5].replace(".npy", "")
     ])
     if not os.path.exists(state_file_path):
         numpy.save(state_file_path, res)
     else:
         state = numpy.load(state_file_path)
         state = numpy.vstack(state, res)
         numpy.save(state_file_path, state)
Exemple #9
0
    def select_q_with_dropout(self, s_t, a_t):
        dropout_qs = np.arrary([])

        with torch.no_grad():
            for i in range(self.dropout_n):
                q_batch = to_numpy(
                    self.critic.forward_with_dropout([
                        to_tensor(s_t), to_tensor(a_t)
                    ]).squeeze(0)[:-1])  # ignore aleatoric variance term
                dropout_qs = np.append(dropout_qs, [q_batch])

        q_mean = torch.mean(dropout_qs)
        q_var = torch.var(dropout_qs)

        return q_mean, q_var
Exemple #10
0
def AirplaneSeatingSim(order):
    global time
    positions = np.linspace(
        0, -1 * len(order), num=len(order), endpoint=False
    ).astype(
        int
    )  #position in line relative to 1 as the first row in the plane (they all start behind the first row)
    rows = np.arrary([row[0]
                      for row in order])  #each individual's assigned row
    seated = emptyPlane(order)
    while (len(order) != 0):
        positions += 1  #Move Everyone forward
        time += 1  #Increment time, Work out units later (1 time unit is 2 seconds)
        order, seated = seating(positions, rows, order, seated)
    return time
def _get_features():
    seq1 = INPUT_SEQ1
    seq1_vec = np.array(list(vp.transform([seq1.encode('utf-8')]))[0])
    seq1_len = len(seq1.split('|'))
    features = {
        'seq1': [],
        'seq1_len': [],
        'seq2': [],
        'seq2_len': []
    }
    for seq2 in POTENTIAL_RESPONSE:
        seq2_vec = np.arrary(list(vp.transform([seq2.encode('utf-8')]))[0])
        seq2_len = len(seq2.split('|'))
        features['seq1'].append(tf.convert_to_tensor(seq1_vec, dtype=tf.int64))
        features['seq1_len'].append(tf.constant(seq1_len, shape=[1,1], dtype=tf.int64))
        features['seq2'].append(tf.convert_to_tensor(seq2_vec, dtype=tf.int64))
        features['seq2_len'].append(tf.constant(seq2_len, shape=[1,1], dtype = tf.int64))

    return features
def get_ref_pos(elt,pt):
    """ Return the reference position of a real point """
    #tetrahedron
    v=np.arrary(pt-elt.nodes_pos[0,:],dtype=float)
    ref_pos=np.dot(v,elt.jacob_inv)
    return ref_pos
Exemple #13
0
def Prepare_TestData(X, testing_RUL, settings_min_max_mean, 
                     sensors_min_max_mean, time_of_event_max, centering, 
                     normalization, dt, all_in_one):
    """
    This function prepares the testing data for model evaluation.
    
    :type X: Pandas Dataframe
    :param X: Test dataframe; each row corresponds to an engine at specific cycle and its operational parameters and sensors measurements at that cycle
    
    :type testing_RUL: Pandas Dataframe
    :param testing_RUL: remaining useful life for every engine (at the end of cycle) in the test data
    
    :type settings_min_max_mean: dictionary
    :param settings_min_max_mean: operational settings features
    
    :type sensors_min_max_mean: dictionary
    :param sensors_min_max_mean: sensor meaturements features
    
    :type time_of_event_max: float
    :param time_of_event_max: maximum value of cycle from any engine in training data
    
    :type centering: boolean (True/False)
    :param centering: - whether input data is min-max normalized and centered for better learning
    
    :type normalization: boolean (True/False)
    :param normalization: whether output label data is normalized
    
    :type dt: integer (e.g. 10,20,30 etc...)
    :param dt: span of cycles as a single input to be learn by the model 
    
    :type all_in_one: boolean (True/False)
    :param all_in_one: whether test data to be prepared separately for every engine or combined; separate option is better as it provides ease to understand the model evaluation results
    
    :return: tuple (Test input data, Test true label data, engines label for test input data, all_in_one to be used in any following functions)
    """
    # calculate 'time_to_event' column for testing data using given RUL 
    X['time_to_event'] = np.nan
    for engine in X['engine_id'].unique():
        cycle_max = X[X['engine_id'] == engine]['cycle'].max()
        RUL_at_end = testing_RUL[testing_RUL['engine_id'] == engine]['RUL']
        X.loc[X['engine_id'] == engine,'time_to_event'] = np.array(range(cycle_max-1+RUL_at_end,RUL_at_end-1,-1))
    
    # initiate testing data preparation
    data_testing = X.copy()
    
    if centering:
        print '[Info] Centering the input test data...'
        # centering for settings columns
        settings = ['setting1','setting2','setting3']
        settings_min = settings_min_max_mean['min']
        settings_max = settings_min_max_mean['max']
        settings_mean = settings_min_max_mean['mean']
        for setting in settings:
            data_testing[setting] = (X[setting] - settings_mean[setting])/(settings_max[setting] - settings_min[setting])
        
        # centering for sensors columns
        sensors = ['s%s'%i for i in range(1,22)]
        sensors_min = sensors_min_max_mean['min']
        sensors_max = sensors_min_max_mean['max']
        sensors_mean = sensors_min_max_mean['mean']
        for sensor in sensors:
            data_testing[sensor] = (X[sensor] - sensors_mean[sensor])/(sensors_max[sensor] - sensors_min[sensor])
    
    if normalization:
        print '[Info] Normalizing the remaining useful life of test data...'
        data_testing['time_to_event'] = X['time_to_event']/time_of_event_max
    
    # prepare testing data - 'X' signifies input data and 'Y' signifies output label data
    if all_in_one: # assimilate all the data regardless of engine_id
        Xtesting, Ytesting, engines = [], [], []
        for engine in data_testing['engine_id'].unique():
            cycle_max = data_testing[data_testing['engine_id'] == engine]['cycle'].max()
            if cycle_max > (dt+5): # at least 5 samples from the engine are expected
                for i in range(cycle_max - dt + 1):
                    select_Xdata = data_testing[data_testing['engine_id'] == engine][settings+sensors][i:i+dt].as_matrix()
                    Xtesting.append(select_Xdata)
                    select_Ydata = data_testing[data_testing['engine_id'] == engine]['time_to_event'].iloc[i+dt-1]
                    Ytesting.append(select_Ydata)
                    engines.append(engine)
        Xtesting = np.array(Xtesting)
        Ytesting = np.array(Ytesting)
        engines = np.arrary(engines)
    else: # assimilate test data for every engine_id. This option will help while evaluating RUL for specific engine_id
        print '[Info] Assimilating test data for every engine...'
        Xtesting, Ytesting, engines = [], [], []
        for engine in data_testing['engine_id'].unique():
            Xtest_engine, Ytest_engine = [], []
            cycle_max = data_testing[data_testing['engine_id'] == engine]['cycle'].max()
            if cycle_max > (dt+5): # at least 5 samples from the engine are expected
                for i in range(cycle_max - dt + 1):
                    select_Xdata = data_testing[data_testing['engine_id'] == engine][settings+sensors][i:i+dt].as_matrix()
                    Xtest_engine.append(select_Xdata)
                    select_Ydata = data_testing[data_testing['engine_id'] == engine]['time_to_event'].iloc[i+dt-1]
                    Ytest_engine.append(select_Ydata)
                Xtesting.append(np.array(Xtest_engine))
                Ytesting.append(np.array(Ytest_engine))
                engines.append(engine)
        Xtesting = np.array(Xtesting)
        Ytesting = np.array(Ytesting)
        engines = np.array(engines)
    return (Xtesting, Ytesting, engines, all_in_one)
Exemple #14
0

Regression is a technique to understand the relationship between 
the variables and how they contribute.
often they are related to producing a particular outcome.


Linear regression  - linear variables

Single Variable Linear Regression is a technique used to model
the relationship between a single input independent variable 
and an output dependent variable using a linear model i.e a line.
Y=mX+c
Multi Variable Linear Regression where a model is created for the 
relationship between multiple independent input variables 
and an output dependent variable. 
Y=a_1*X1+a_2*X2+c

numpy - numerical python
python is slower in execution


numpy fastens python operations by converting repetative code to 
compiled form
import numpy as np
print "Numpy imported successfully"
intarray = np.arrary([1,2,3,4],int)

print(intarray[2])
Exemple #15
0
 def fit(self,X):
     assert X.ndim==2,'the dimension of X must be 2'
     self.mean_=np.array([np.mean(X[:i]) for i in range(X.shape[1])])
     self.scale_=np.arrary([np.std(X[:i]) for i in range(X.shape[1])])
     return self
Exemple #16
0
# how to do benchmarking
# conclusion
ds.info()
ds['quality'] = pd.Categorical(ds['quality'])
ds.info()
set(ds.quality)
plt.figure(figsize=(10, 6))
sns.distplot(ds["fixed acidity"])
plt.figure(figsize=(10, 6))
sns.distplot(ds["volatile acidity"])
plt.figure(figsize=(10, 6))
sns.boxplot(ds["fixed acidity"]
            )  #just to check how outliers are handled by Decision Tree
X = ds.drop(columns=['quality']).as_matrix(
)  # .as_matrix() or np.array will do the same thing
y = np.arrary(ds['quality'])
Xtrain, Xtest, Ytrain, Ytest = train_test_split(X,
                                                y,
                                                test_size=0.20,
                                                random_state=1234)
lm = LogisticRegression()
lm
lm.fit(Xtrain, Ytrain)
lm.predict(Xtest)
lm.score(Xtrain, Ytrain)
lm.score(Xtest, Ytest)

pred_quality = lm.predict(Xtest)
confusion_matrix(Ytest, pred_quality)
print(classification_report(Ytest, pred_quality))
np.save('processed_np/test_ben_cont.npy', test_ben_cont)

test_van_cont = np.array(test_vandals[['day', 'hour', 'minute', 'second']])
np.save('processed_np/test_van_cont.npy', test_van_cont)

data = train_benigns.append(test_benigns).append(test_vandals)
del train_benigns, test_benigns, test_vandals
gc.collect()

# Drop labels
data.drop(['day', 'hour', 'minute', 'second', 'is_attributed'],
          axis=1,
          inplace=True)

# To categorical
data = data.apply(LabelEncoder().fit_transform)

train_benigns = data[:len1]
test_benigns = data[len1:len1 + len2]
test_vandals = data[-len3:]

# One-hot encoding
enc = OneHotEncoder()
train_benigns = enc.fit_transform(train_benigns)
test_benigns = enc.transform(test_benigns)
test_vandals = enc.transform(test_vandals)

np.save('processed_np/train_cat_sparse.npy', np.array(train_benigns))
np.save('processed_np/test_ben_cat_sparse.npy', np.array(test_benigns))
np.save('processed_np/test_van_cat_sparse.npy', np.arrary(test_vandals))
                    pre_l = l

                # update inner best attack
                for e, (dlt, pred, img) in enumerate(zip(dlts, preds, nimg)):
                    if dlt < mindlt[e] and np.argmax(pred) != batch_Y[e]:
                        mindlt[e] = dlt
                    if dlt < o_mindlt[e] and np.argmax(pred) != batch_Y[e]:
                        o_mindlt[e] = dlt
                        o_bestatt[e] = img
                        o_findatt[e] = True

            # update const based on evaluation
            for e in range(B_SIZE):
                if o_findatt[e]:
                    # find adversarial example, current const is upper bound
                    const_ub[e] = min(const_ub[e], batch_const[e])
                    batch_const[e] = (const_lb[e] + const_ub[e]) / 2.
                else:
                    # doesn't find adversarial example, current const is lower bound
                    const_lb[e] = max(const_lb[e], batch_const[e])
                    batch_const[e] = (const_lb[e] + const_ub[e]) / 2.

        # store the best attack of current batch
        final_att.extend(o_bestatt)
        final_status.extend(o_findatt)

    print('========= Saving Results =========')
    # store the attacking result
    np.save('adversarial_examples.npy', np.arrary(final_att))
    np.save('searching_result.npy', np.array(final_status))