Beispiel #1
0
    def __init__(self, kinds):

        self.svm_param = {
            'gamma': ['scale', 'auto'],
            'tol': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
            'C': [
                1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20,
                30
            ],
        }

        self.svm = SVC(kernel='linear')

        dirpath = os.getcwd()
        self.cvSavepath = os.path.join(dirpath, 'image',
                                       kinds + '_rfecv_3rd.jpg')
        self.fsSavepath = os.path.join(dirpath, 'image',
                                       kinds + '_rfefs_3rd.jpg')

        data1, data2, self.label = get_data(kinds)

        self.RFEtrain(data1)
        self.RFEtrain(data2)

        plt.close(1)
        plt.close(2)
Beispiel #2
0
    def __init__(self, kinds):

        self.svm_param = [
            {
                "kernel": ['linear', 'rbf', 'sigmoid'],
                'gamma': ['scale', 'auto'],
                'tol': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
                'C': [
                    1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
                    20, 30
                ],
            },
            {
                "kernel": ['poly'],
                'gamma': ['scale', 'auto'],
                'degree': [i for i in range(1, 11)],
                'tol': [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1],
                'C': [
                    1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
                    20, 30
                ],
            },
        ]

        self.svm = SVC()

        self.savepath = os.path.join(os.getcwd(), 'image', kinds + '_svc.jpg')

        data1, data2, self.label = get_data(kinds)

        self.trainproc(data1)
        self.trainproc(data2)

        plt.close()
Beispiel #3
0
def lstm(params):
    modeltype = "lstm"
    learning_rate = params["learning_rate_lstm"]
    optimizer = params["optimizer"]

    #format has to be YYYY/mm/dd
    train_end_date_ = params["train_end_date"]
    train_end_date = datetime.strptime(train_end_date_, "%Y/%m/%d")

    # forecast start day
    forecast_start_date_ = params["forecast_start_date"]
    forecast_start_date = datetime.strptime(forecast_start_date_, "%Y/%m/%d")

    print("Train End Date", train_end_date)

    ## retrain lstm here write predictions to file system
    data = datautils.get_data()

    print("Data received:", data.head())
    results, cv_info = lstmutils.do_walk_forward_validation_and_get_best_models(
        data, train_end_date, learning_rate, optimizer)

    datautils.format_and_store_cv_data(cv_info)
    forecast_start_date = forecast_start_date + dt.timedelta(-1)

    for result in results:

        store = result["store"]
        model = result["model"]
        scaler = result["scaler"]

        full_data_for_store = data[data.Store == store]

        print("Creating predictions for store: ", store)
        predictions = lstmutils.predict(model, scaler, full_data_for_store,
                                        forecast_start_date)

        test_ = data[(data.Store == store)
                     & (data.date_ > forecast_start_date)]

        prices = pd.Series(predictions.reshape(len(predictions), ).tolist(),
                           name="value")
        dates = test_.Date.reset_index(drop=True)

        pred_for_store_on_date = pd.concat([dates, prices], axis=1)

        datautils.write_predictions(modeltype, store, pred_for_store_on_date)

    return "Success"
Beispiel #4
0
    dictfile.close()


if __name__ == "__main__":
    # exp_id = 'vgg19'
    import time
    start = time.time()
    basedir = os.path.abspath(os.path.dirname(__file__))
    """Parser of command args"""
    parse = argparse.ArgumentParser()
    parse.add_argument("--exp_id", type=str, help="exp_identifiers")
    parse.add_argument("--ptype", type=str, help="GF//NEB//NAI//WS")
    parse.add_argument("--mutants_num", type=int, help="100")
    exp_id = sys.argv[1]
    ptype = sys.argv[2]
    sample = len(get_data(exp_id)[0])
    file_name = 'model_perturbation_' + str(exp_id) + '_' + str(ptype)
    basedir = os.path.dirname(__file__)
    basedir = os.path.join(basedir, 'model')
    basedir = os.path.join(basedir, exp_id)
    if not os.path.exists(basedir):
        os.mkdir(basedir)
    file_name = os.path.join(basedir, file_name)
    kill_num_dict = {i: 0 for i in range(int(float(sample)))}
    save_dict(dictionary=kill_num_dict, filename=file_name)

    model_save_path = 'perturbated_' + ptype
    model_save_path = os.path.join(basedir, model_save_path)
    file_list = [model_save_path + '/' + str(i) + '.h5' for i in range(3)]
    file_id = 0
    for file in file_list:
import pickle
from datautils import get_data,get_model,data_proprecessing
import os
import sys

def read_kill_rate_dict(file_name):
    dictfile = open(file_name + '.dict', 'rb')
    kill_rate_file = pickle.load(dictfile)
    if type(kill_rate_file) == dict:
        kill_rate_dict = kill_rate_file
    else:
        kill_rate_dict = {score: letter for score, letter in kill_rate_file}
    return kill_rate_dict

exp_id = sys.argv[1]
sample = len(get_data(exp_id)[0])
ptypes =  sys.argv[2]

if __name__ == '__main__':

    if ptypes == 'input': 
        input_types = ['gauss','reverse','black','white','shuffle']
    elif ptypes == 'model':
        input_types = ['GF','NAI','NEB','WS']
    elif ptypes == 'nlp':
        input_types = ['vs','vr','vrp']
        ptypes = 'input'
    elif ptypes == 'form':
        input_types = ['ad']
        ptypes = 'input'
        
Beispiel #6
0
    param: output_size -> int
    """
    def __init__(self, input_size, output_size):
        super(FizBuzNet, self).__init__()
        hidden_size = 100
        self.hidden = nn.Linear(input_size, hidden_size)
        self.out = nn.Linear(hidden_size, output_size)

    def forward(self, batch):
        hidden = self.hidden(batch)
        activated = F.sigmoid(hidden)
        out = self.out(activated)
        return out


trX, trY, teX, teY = get_data(input_size, limit=1000)
if torch.cuda.is_available():
    xtype = torch.cuda.FloatTensor
    ytype = torch.cuda.LongTensor
else:
    xtype = torch.FloatTensor
    ytype = torch.LongTensor
x = torch.from_numpy(trX).type(xtype)
y = torch.from_numpy(trY).type(ytype)

net = FizBuzNet(input_size, 4)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=lr)
total_time = []
no_of_batches = int(len(trX) / batches)
for epoch in range(epochs):
Beispiel #7
0
    return a


file_name = sys.argv[1]
file = sys.argv[2]
file_id = int(float(sys.argv[3]))
exp_id = sys.argv[4]
ptype = sys.argv[5]
from keras import backend as K
basedir = os.path.dirname(__file__)
basedir = os.path.join(basedir, 'model')
basedir = os.path.join(basedir, exp_id)

predicting_file_path = os.path.join(
    basedir, 'predict_probability_vector_' + str(exp_id) + '.npy')
X_test, Y_test = get_data(exp_id)
X_test = data_proprecessing(exp_id)(X_test)
origin_model = get_model(exp_id)
if not os.path.exists(predicting_file_path):
    a = origin_model.predict(X_test)
    np.save(predicting_file_path, a)
    origin_model_result = a
else:
    origin_model_result = np.load(predicting_file_path)

origin_model_result = np.argmax(origin_model_result, axis=1)

kill_num_dict = load_dict(file_name)
result_recording_file = open(file_name + '.txt', 'a')
start = datetime.datetime.now()
my_model = keras.models.load_model(file)
Beispiel #8
0
    """
    def __init__(self, input_size, output_size):
        super().__init__()
        # A simple heuristic to find the hiddenlayer size
        hidden_size = 100
        self.hidden = nn.Linear(input_size, hidden_size)
        self.out = nn.Linear(hidden_size, output_size)

    def forward(self, batch):
        hidden = self.hidden(batch)
        activated = F.sigmoid(hidden)
        out = self.out(activated)
        return F.sigmoid(out)


trX, trY, teX, teY = get_data(input_size)
if torch.cuda.is_available():
    dtype = torch.cuda.FloatTensor
else:
    dtype = torch.FloatTensor
x = torch.from_numpy(trX).type(dtype)
y = torch.from_numpy(trY).type(dtype)

net = FizBuzNet(input_size, 4)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=lr)
total_time = []
no_of_batches = int(len(trX) / batches)
for epoch in range(epochs):
    for batch in range(no_of_batches):
        optimizer.zero_grad()
    part = image[0 + 2 * i:2 + 2 * i, 0 + 2 * j:2 + 2 * j].copy()
    part_r = part.reshape(-1, 1)
    np.random.shuffle(part_r)
    part_r = part_r.reshape(part.shape)
    image[0 + 2 * i:2 + 2 * i, 0 + 2 * j:2 + 2 * j] = part_r
    return image


exp_id = sys.argv[1]
perturbate_type = sys.argv[2]

if __name__ == '__main__':
    #2
    import time
    start = time.time()
    x, y = get_data(exp_id)

    # making needed directory
    basedir = os.path.dirname(__file__)

    if not os.path.exists(os.path.join(basedir, 'input')):
        os.mkdir(os.path.join(basedir, 'input'))
    basedir = os.path.join(basedir, 'input')
    basedir = os.path.join(basedir, exp_id)
    if not os.path.exists(basedir):
        os.mkdir(basedir)
    basedir = os.path.join(basedir, exp_id)
    if not os.path.exists(basedir):
        os.mkdir(basedir)
    if not os.path.exists(os.path.join(basedir, 'gauss')):
        os.mkdir(os.path.join(basedir, 'gauss'))