示例#1
0
def randomseries(n):
    '''
Gerador de Série Temporal Estocástica - V.1.2 por R.R.Rosa 
Trata-se de um gerador randômico não-gaussiano sem classe de universalidade via PDF.
Input: n=número de pontos da série
res: resolução 
    '''
    res = n/12
    df = pd.DataFrame(np.random.randn(n) * np.sqrt(res) * np.sqrt(1 / 128.)).cumsum()
    a=df[0].tolist()
    a=funcs.normalize(a)
    x=range(0,n)
    return x,a
示例#2
0
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import funcs



data = pd.read_csv('IoT.csv', delimiter=',')
labels = pd.DataFrame(data, columns=['label'])
data = pd.DataFrame(data, columns=['duration', 'orig_bytes', 'orig_pkts', 'proto', 'resp_bytes', 'conn_state',
                               'resp_pkts'])

quantity = ['duration', 'orig_bytes', 'orig_pkts', 'resp_bytes', 'resp_pkts']

data = funcs.normalize(data, quantity)

data = pd.get_dummies(data, columns=['proto', 'conn_state'])
dtype = float;
# print(data)

trust_old = dict()

for index in quantity:
     trust_old[index] = [np.mean(data[index]) - (3 * np.std(data[index])),
                 np.mean(data[index]) + (3 * np.std(data[index]))]
     data.loc[(data[index] > trust_old[index][1]) | (data[index] < trust_old[index][0]), index] = np.nan

data = data.dropna()

le = LabelEncoder()
示例#3
0
testSample = pd.read_csv(baseLoc + 'dataManagerFiles/train/' +
                         "testwithCovars.csv",
                         index_col=pk)
testSample.columns = [
    name.replace("testWithDummies", "mainWithDummies")
    for name in testSample.columns
]

main = main.fillna(main.mean())
# maxi=main.max()
# mini=main.min()
# diff=pd.DataFrame(index=main.coulmns,values=maxi-mini)
# zeroes=diff[diff[0]==0].columns

testSample = testSample.fillna(main.mean())
main, testSample = normalize(main.drop(target, axis=1), testSample)
nanColumns = main.columns[main.isna().any()].tolist()
main = main.drop(nanColumns, axis=1)
testSample = testSample.drop(nanColumns, axis=1)
main = main.join(tar)
test = main.sample(n=int(main.shape[0] * 0.20), random_state=0)
train = main.drop(test.index, axis=0)
test_y = test[[target]]
train_y = train[[target]]
train = train.drop(target, axis=1)
test = test.drop(target, axis=1)
X = train
y = train_y

varSelected = [
    'Age0mainWithDummies', 'TicketMeanT0mainWithDummies',
示例#4
0
validation = 0
if validation == 1:
    test = train.sample(n=10000)
    train = train.drop(test.index, axis=0)
train_y = train['TARGET']
test_y = test['TARGET']

varSelected = [
    'EXT_SOURCE_3', 'EXT_SOURCE_2', 'EXT_SOURCE_1', 'DAYS_EMPLOYED',
    'AMT_GOODS_PRICE', 'DAYS_CREDIT_min', 'PRODUCT_d_mean',
    'REGION_RATING_CLIENT_W_CITY', 'lowOccupation', 'utilization_-6_mean_max',
    'payTominDue_-6_mean_mean', 'EXT_SOURCE_3', 'EXT_SOURCE_2', 'EXT_SOURCE_1',
    'utilization_-6_mean_max', 'payTominDue_-6_mean_mean'
]  #,'active_sumbur','active_mean'
#train,test=normalize(train[varSelected],test[varSelected])
train, test = normalize(train[varSelected], test[varSelected])
train['TARGET'] = train_y
test['TARGET'] = test_y
train.describe().to_csv(
    "/home/pooja/PycharmProjects/datanalysis/finalDatasets/des.csv")

abc = AdaBoostClassifier(n_estimators=200, learning_rate=1)
# Train Adaboost Classifer
mlp = abc.fit(train[varSelected], train['TARGET'])

#Predict the response for test dataset
trainer = pd.DataFrame(mlp.predict_proba(train[varSelected].values),
                       columns=['good', 'TARGET'],
                       index=train.index)[['TARGET']]
submision = pd.DataFrame(mlp.predict_proba(test[varSelected].values),
                         columns=['good', 'TARGET'],
face_detector = mtcnn.MTCNN()
face_encoder = load_model(encoder_model)

encoding_dict = dict()

for person_name in os.listdir(people_dir):
    person_dir = os.path.join(people_dir, person_name)
    encodes = []
    for img_name in os.listdir(person_dir):
        img_path = os.path.join(person_dir, img_name)
        img = cv2.imread(img_path)
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        results = face_detector.detect_faces(img_rgb)
        if results:
            box = max(results, key=lambda b: b['box'][2] * b['box'][3])
            l_e = box['keypoints']['left_eye']
            r_e = box['keypoints']['right_eye']
            face = align(img, l_e, r_e, size=required_size, eye_pos=(0.35, 0.4))
            face = normalize(face)
            encode = get_encode(face_encoder, face, required_size)
            encodes.append(encode)
    if encodes:
        encode = np.sum(encodes, axis=0)
        encode = l2_normalizer.transform(encode.reshape(1, -1))
        encoding_dict[person_name] = encode[0]
for key in encoding_dict.keys():
    print(key)

with open(encodings_path, 'bw') as file:
    pickle.dump(encoding_dict, file)