def saveCurrentPollutionData():
  response = aqicn.getAllPollutionData()
  save_path = config['AQICN']['SAVE_PATH']
  utils.saveData(save_path,
                 "aqicn_index_" + utils.getCurrentDayTime() + ".json",
                 json.dumps(response, sort_keys=False, indent=2))
  return jsonify(response)
Beispiel #2
0
def saveToLib(crv=None, shapeName=None):
    '''Saves the shape data to a shape file in the SHAPE_LIBRARY_PATH directory'''
    crvShape = getShape(crv=crv)
    path = os.path.join(SHAPE_LIBRARY_PATH,
                        re.sub("\s", "", shapeName) + ".json")
    for shapeDict in crvShape:
        shapeDict.pop("colour", None)
    utils.saveData(path, crvShape)
Beispiel #3
0
def exit(self):
	try:
		for x in datapipe.channelDict:
			for user in x.users:
				user = datapipe.getUser(user).name
				datapipe.lastseenDict[user]=datetime.datetime.now()
	except IndexError:
		pass
	utils.saveData(self.lastseenDict, 'lastseen')
Beispiel #4
0
def main():
    #load the JPEG images or the npz file

    #data, labels = load_images() #un-comment if want to load JPEG images into numpy
    #saveData('data/data.npz', data, labels) #un-comment if want to save loaded JPEG images as npz for faster loading later
    data, labels = loadData(
        'data/data.npz'
    )  #load images from npz, much faster then loading from JPEGS each time

    #split dataset into train, val, and test ... split into 70/30 and then 70/15/15
    print('Splitting data into 70/15/15 train, val, and test sets.')
    train, testval, train_labels, testval_labels = train_test_split(
        data,
        labels,
        test_size=0.30,
        random_state=42,
        shuffle=True,
        stratify=labels)
    test, val, test_labels, val_labels = train_test_split(
        testval,
        testval_labels,
        test_size=0.50,
        random_state=42,
        shuffle=True,
        stratify=testval_labels)
    del data, labels, testval, testval_labels  #free up memory

    #check imbalance
    print('Train Shape:', train.shape, 'Train Labels Shape:',
          train_labels.shape)
    print('Validation Shape:', val.shape, 'Validation Labels Shape:',
          val_labels.shape)
    print('Test Shape:', test.shape, 'Test Labels Shape:', test_labels.shape)
    plotClassDist(train_labels, 'Train Class Distribution')
    plotClassDist(val_labels, 'Validation Class Distribution')
    plotClassDist(test_labels, 'Test Class Distribution')

    #random oversample train set and recheck balance
    ovs_data, ovs_labels = randomOversample(train, train_labels)
    print('OVS Data Shape:', ovs_data.shape, 'OVS Labels Shape:',
          ovs_labels.shape)
    plotClassDist(ovs_labels, 'Train Class Distribution (ROS)')

    #oversample using smote on train set and recheck balance
    smt_data, smt_labels = smoteOversample(train, train_labels)
    print('SMT Data Shape:', smt_data.shape, 'SMT Labels Shape:',
          smt_labels.shape)
    plotClassDist(smt_labels, 'Train Class Distribution (SMOTE)')

    #save untouched and oversampled data as npz along with val and test
    saveData('data/train.npz', train, train_labels)
    saveData('data/ros_data.npz', ovs_data, ovs_labels)
    saveData('data/smt_data.npz', smt_data, smt_labels)
    saveData('data/val.npz', val, val_labels)
    saveData('data/test.npz', test, test_labels)
Beispiel #5
0
def main():
    path2data = "data/post-process/data.pkl"
    path2wordFreqDict = "data/post-process/wordfreq.pkl"
    path2pretrainWordEmbedding = "data/word-embedding/glove.vtb.100d.txt"
    devRatio = 0.1

    path2dirOut = "data/nlm-input/"

    data = utils.loadPKLFile(path2data)
    wordFreqDict = utils.loadPKLFile(path2wordFreqDict)

    vocab, invertVocab = buildVocabIndex(wordFreqDict)
    print "Vocabulary Size:", len(vocab) + 1

    input, inputLen, output = makeNeuralLMDataFormat(data, vocab)

    input = convertList2NumpyArray(input)
    inputLen = convertList2NumpyArray(inputLen)
    output = convertList2NumpyArray(output)

    # Split
    input, inputLen, output = utils.unisonShuffleNumpyArrays(
        input, inputLen, output)

    totalIns = input.shape[0]

    nbDev = int(totalIns * devRatio)

    train = (input[:-nbDev], inputLen[:-nbDev], output[:-nbDev])
    dev = (input[-nbDev:], inputLen[-nbDev:], output[-nbDev:])

    # Word Embedding
    pretrainWordEmbedding = utils.loadPretrainWordEmbedding(
        path2pretrainWordEmbedding)
    wordEmbeddingMatrix = makeWordEmbeddingMatrix(pretrainWordEmbedding, vocab,
                                                  100)

    utils.saveData(path2dirOut, 'train', wordEmbeddingMatrix, *train)
    utils.saveData(path2dirOut, 'dev', None, *dev)

    utils.savePKLFile(os.path.join(path2dirOut, 'vocab.pkl'), vocab)
    utils.savePKLFile(os.path.join(path2dirOut, 'invert-vocab.pkl'),
                      invertVocab)
    print 'Finished!'
Beispiel #6
0
def oninfo(ev,server,plugin):
  if ev["sender"] != False and ev["content"].startswith('!!loc'):
    if re.match(r"^!!loc help$", ev["content"]):
      printhelp(server)
    elif re.match(r"^!!loc save$", ev["content"]):
      global locs
      utils.saveData('location', locs)
      server.tell(ev["sender"], CC('[LOC] ','b'), CC('保存完毕', 'e'))
    elif re.match(r"^!!loc add \S+ -?\d+ -?\d+ -?\d+ (-1|0|1)$", ev["content"]):
      add(server, ev)
    elif re.match(r"^!!loc add \S+ here$", ev["content"]):
      addHere(server, ev)
    elif re.match(r"^!!loc del \S+$", ev["content"]):
      delete(server, ev)
    elif re.match(r"^!!loc conv \S+$", ev["content"]):
      conv(server, ev)
    elif re.match(r"^!!loc ex$", ev["content"]):
      getAllVoxel(server, ev)
    elif re.match(r"^!!loc \S+$", ev["content"]):
      get(server, ev)
    elif re.match(r"^!!loc$", ev["content"]):
      getAll(server, ev)
    else:
      server.tell(ev["sender"], CC('[LOC] ','b'), CC('输入无效,使用 !!loc help 查看帮助', 'c'))
Beispiel #7
0
def onunload(ev,sender,plugin):
  if ev["name"] == name:
    global locs
    utils.saveData('location', locs)
Beispiel #8
0
                elif 8 == sampRateMode:
                    imfFileExt = imfFileExt + '08'
                else:
                    print('Invalid Sampling Rate Mode:', sampRateMode)
                    sys.exit()
                savefilename = dataSaveFolder + '/' + filename.replace(
                    '.wav', imfFileExt)
                # create folder if not existed
                if not os.path.exists(os.path.dirname(savefilename)):
                    os.makedirs(os.path.dirname(savefilename))

                inputShape = np.shape(imfFeatures)
                # print(inputShape)
                if inputShape[1:] == (4, 6, numberOfImfs):
                    # save imf feature input file
                    utils.saveData(imfFeatures, savefilename)
                    print('Prepared: ' + savefilename, inputShape)
                    if 48 == sampRateMode:
                        # save spectogram input file
                        inputShape = np.shape(spectos)
                        # print(inputShape)
                        savefilename = dataSaveFolder + '/' + filename.replace(
                            '.wav', '.spct48')
                        utils.saveData(spectos, savefilename)
                        print('Prepared: ' + savefilename, inputShape)
                    elif 8 == sampRateMode:
                        pass
                    else:
                        print('Invalid Sampling Rate Mode:', sampRateMode)
                        sys.exit()
                else:
Beispiel #9
0
def onunload(ev, server, plugin):
    if ev["name"] == name:
        utils.saveData('gamemode', gm)
Beispiel #10
0
 def saveResult(self):
     utils.saveData(self.RESULT, self.OUTPUT_FILE)
     self.LOG("Results saved to: " + self.OUTPUT_FILE)
Beispiel #11
0
def test(args):
    my_model = RCAN(args)  # model.RDN()

    save = saveData(args)
    dataloader = get_dataset(args)
    my_model.cuda()
    # my_model.eval()
    model_path = os.path.join(args.model_savepath, args.model_name)
    # my_model.load_state_dict(torch.load(model_path))
    my_model = save.load_model(my_model, model_path)
    for i, (lr_in, name) in enumerate(dataloader):
        # _,_,w,h =lr_in.shape
        # out_img =torch.ze  切图
        # lr_in_ = lr_in.numpy()
        _, _, w, h = lr_in.shape
        # print(_,_,w,h)
        # out_img = np.zeros((1, 3, w, h))
        # in_img1 = np.zeros((1, 3, int(w / 3), int(h / 2)))
        # in_img2 = np.zeros((1, 3, int(w / 2), int(h / 2)))
        # for i in range(5):
        #     img_hr_out=np.zeros((3,w*4,h*4))
        #     for j in range(10):
        # img_hr_out=np.zeros((3,w*4,h*4))
        # z=0
        # in_img1 = np.zeros((6,3, 90,60))
        # for i_w in range(3):
        #     for i_h in range(2):
        #         in_img1[z,:,:,:] =lr_in[0,:,(i_w)*(int(w /3)):(i_w+1)*(int(w /3)),(i_h)*(int(h /8)):(i_h+1)*(int(h /8))]
        #         z=z+1
        # in_img1
        # in_img2 = lr_in[:, :, 0:w, int(h / 2):]
        # in_img1=torch.from_numpy(in_img1)
        # in_img2 = torch.from_numpy(in_img2)
        in_img1 = lr_in.cuda().float()  #, volatile=False)
        in_img1 = my_model(in_img1)
        in_img1 = in_img1[0]
        img_hr_out = in_img1.cpu().data.numpy()
        # z=0
        # for i_w in range(3):
        #     for i_h in range(2):
        #         img_hr_out[:,(i_w)*(int(w /3))*4:(i_w+1)*(int(w /3))*4,4*(i_h)*(int(h /8)):4*(i_h+1)*(int(h /8))] = in_img1[z,:,:,:]
        #         z=z+1
        img_hr_out = img_hr_out.transpose((1, 2, 0))
        img_hr_out = img_hr_out
        img_hr_out = np.ceil(img_hr_out * 256)

        img_hr_out[img_hr_out > 255] = 255
        img_hr_out[img_hr_out < 0] = 0
        # img_hr_out1 = np.zeros((1080,1920,3))

        out_i = i // 100
        i_i = i % 100
        # img_hr_out1 =cv2.imread(args.result_SR_Dir + "/sr/"+str(out_i) + '/%05d_sr.bmp' % (i_i))
        #拼图片
        # img_hr_out1[360:2*360,0:960,:]=img_hr_out

        if not os.path.exists(args.result_SR_Dir + "/sr/" + str(out_i)):
            os.mkdir(args.result_SR_Dir + "/sr/" + str(out_i))
        cv2.imwrite(
            args.result_SR_Dir + "/sr/" + str(out_i) + '/%05d_sr.bmp' % (i_i),
            img_hr_out)
Beispiel #12
0
        foldername = os.path.basename(folderpath).split('_')
        foldername[0] = 'inputs'
        dataSaveFolder = os.path.dirname(folderpath)+'/'+'_'.join(foldername)
        inputFile = []
        inputFile2 = []
        swappedInstfArr = np.array(instfArr).swapaxes(0, 1)
        swappedMagArr = np.array(mags).swapaxes(0, 1)[:-1]
        print(swappedInstfArr.shape, swappedMagArr.shape)
        for i in range(len(swappedInstfArr)):
            inputFile.append({'instantFrequencies': swappedInstfArr[i], 'magnitudes': swappedMagArr[i]})
            inputFile2.append([swappedInstfArr[i], swappedMagArr[i]])
        savefilename = dataSaveFolder+'/'+filename+'.inp'
        if not os.path.exists(os.path.dirname(savefilename)):
            os.makedirs(os.path.dirname(savefilename))
        print(np.shape(inputFile))
        utils.saveData(inputFile, savefilename)
        utils.saveData(np.array(inputFile2), savefilename+'2')
        print(filename+'.inp prepared.')
    print('----------------------')

    print('plotting...')

    if plotSpec:
        specsAspectRatio = specs.shape[0] / specs.shape[1]  # for better print size and look
        # Make plot with vertical (default) colorbar
        fig, ax = plt.subplots(figsize=(15 * specsAspectRatio, 15))

        data = np.swapaxes(specs, 0, 1)
        data = np.ma.masked_values(data, 0)

        cmap = plt.cm.magma
Beispiel #13
0
 def saveCache(self):
     utils.saveData(self.RESULT, self.CACHE_FILE)
Beispiel #14
0
t0 = time.time()
for i in range(epochs):
    print("Epoch", i)
    for img, lbl, idx in zip(imgs, binLbls, range(NTRAIN)):
        train(img, lbl)
t1 = time.time()

predictedClasses = predictMatrix(imgsTest)
nErrorTest = NTEST - np.sum(predictedClasses == lblsTest)
print("Test errors:", nErrorTest, "%:", nErrorTest / NTEST * 100.0)

print("Training time:", (t1 - t0))
np.savez("weights/" + "backSoft.npz",
         W1=W1.get_value(),
         b1=b1.get_value(),
         W2=W2.get_value(),
         b2=b2.get_value())

nErrorTrain = NTRAIN - np.sum(predictMatrix(imgs) == lbls)
print("Train errors:", nErrorTrain, "%:", nErrorTrain / NTRAIN * 100)

algDescription = "Algoritmo con backpropagation en una NN con una capa oculta de \n"
algDescription += str(
    nHidden
) + " neuronas con función de activación la tanh de Lecun. Y una capa de\n"
algDescription += "salida tipo softmax. Usando una tasa de aprenzidaje de" + str(
    lr) + "\n y dando "
algDescription += str(epochs) + " al conjunto de train."
saveData(algDescription, predictMatrix(imgs), predictMatrix(imgsTest), t1 - t0,
         "b1withLeCunActFun.txt")
Beispiel #15
0
def onunload(ev, server, plugin):
    if ev["name"] == name:
        utils.saveData("backup", data)
Beispiel #16
0
 def getdata(self, code):
     data = utils.doParse(code)
     utils.saveData(db, data)
     return simplejson.dumps(data, cls=utils.MyJSONEncoder)
Beispiel #17
0
def magic(model_name,
          opt,
          lrs,
          fc_layers=(1024, 1024),
          batch_size=64,
          dropout=0.5,
          MAX_EPOCHS=100,
          dry_run=False,
          lr=0.0001,
          acf='relu',
          pred_acf='softmax',
          model_from_file=False,
          model_json="",
          model_w_file=""):
    #K.variable(lr, name='lr')
    opt_name = opt[0]
    opt = opt[1]()
    lrs_name = lrs[0]
    #model_name_save = f"{model_name}_detect_FULL__OPT_{opt_name}___LRS_{lrs_name}_final_1024"
    model_name_save = f"{model_name}_detect_FULL__OPT_{opt_name}___LRS_{lrs_name}_final_4096"

    print("Training for " + model_name_save)
    if dry_run:
        return
    """ big data set    
    TRAIN_DIR = "data/Train/flower_splits/train"
    VAL_DIR = "data/Train/flower_splits/val"
    TEST_DIR = "data/Train/flower_splits/test"
    """
    TRAIN_DIR = "data/Train/detect_train"
    VAL_DIR = "data/Validation/detect_train"
    #TEST_DIR = "data/TrainBig/flower/test"

    img_width = 224
    img_height = 224
    img_channels = 3

    #datagen = ImageDataGenerator(rescale=1./255)
    train_datagen = ImageDataGenerator(
        rescale=1. / 256,
        horizontal_flip=True,
        vertical_flip=True,
        # shear_range=,
        zoom_range=0.05,
        width_shift_range=0.05,
        height_shift_range=0.05,
        rotation_range=15)

    val_datagen = ImageDataGenerator(rescale=1. / 256)
    #datagen = Augmentator._get_ImageDataGenerator()

    train_gen = train_datagen.flow_from_directory(directory=TRAIN_DIR,
                                                  target_size=(img_width,
                                                               img_height),
                                                  color_mode='rgb',
                                                  batch_size=batch_size,
                                                  class_mode='categorical',
                                                  shuffle=True,
                                                  seed=seed)

    val_gen = val_datagen.flow_from_directory(directory=VAL_DIR,
                                              target_size=(img_width,
                                                           img_height),
                                              color_mode='rgb',
                                              batch_size=batch_size,
                                              class_mode='categorical',
                                              shuffle=True,
                                              seed=seed)

    # train_gen.n gets number of samples. // => floor division
    TRAIN_STEP_SIZE = train_gen.n // batch_size
    VAL_STEP_SIZE = val_gen.n // batch_size

    # set up model
    # dataset as input - used to extract number of classes and inputsize
    if model_from_file:
        model = get_model_from(json_file_name=model_json,
                               h5_file_name=model_w_file,
                               acf=acf,
                               pred_acf=pred_acf)
    else:
        model = get_model(model_name,
                          dataset=train_gen,
                          dropout=dropout,
                          fc_layers=fc_layers,
                          acf=acf,
                          pred_acf=pred_acf)
    model_json = model.to_json()
    with open(model_name_save + ".json", "w") as json_file:
        json_file.write(model_json)

    model.compile(optimizer=opt,
                  loss="categorical_crossentropy",
                  metrics=['accuracy'])
    #K.set_value(model.optimizer.lr, lr)
    lrs_obj = lrs[1]()
    callbacks_list = get_callbacks(model_name_save)
    callbacks_list.append(lrs_obj)

    fit_history = model.fit_generator(train_gen,
                                      steps_per_epoch=TRAIN_STEP_SIZE,
                                      validation_data=val_gen,
                                      validation_steps=VAL_STEP_SIZE,
                                      epochs=MAX_EPOCHS,
                                      callbacks=callbacks_list,
                                      verbose=verbose)

    print('Saving final weights...')
    model.save_weights("dl_weights/" + model_name_save + timestamp +
                       "final.h5",
                       overwrite=True)

    # ----------- CREATING PLOTS AND SAVING DATA -----------
    DIR_TO_SAVE_IN = "results/plots/" + model_name + '/' + timestamp + '/'
    if not os.path.exists(DIR_TO_SAVE_IN):
        os.makedirs(DIR_TO_SAVE_IN)
    utils.createPlot(fit_history.history['acc'],
                     modelName=model_name_save,
                     unit="Accuracy",
                     title="Model accuracy for " + model_name,
                     ylabel="Accuracy",
                     xlabel="Epochs",
                     folder=model_name + '/' + timestamp)
    utils.createPlot(fit_history.history['loss'],
                     modelName=model_name_save,
                     unit="Loss",
                     title="Model loss for " + model_name,
                     ylabel="Loss",
                     xlabel="Epochs",
                     folder=model_name + '/' + timestamp)
    utils.createPlot(fit_history.history['lr'],
                     modelName=model_name_save,
                     unit="LearnRate",
                     title="Model Learning Rate for " + model_name,
                     ylabel="Learning Rate",
                     xlabel="Epochs",
                     folder=model_name + '/' + timestamp)

    utils.saveData(fit_history.history['acc'],
                   filepath=DIR_TO_SAVE_IN + "Accuracy.P")
    utils.saveData(fit_history.history['loss'],
                   filepath=DIR_TO_SAVE_IN + "Loss.P")
    utils.saveData(fit_history.history['lr'],
                   filepath=DIR_TO_SAVE_IN + "LearnRate.P")
    lrs_obj = None
    opt = None
    model = None
    val_gen = None
    train_gen = None
    val_datagen = None
    train_datagen = None

    print('Done...')
Beispiel #18
0
def processResult(result):
    format = result.headers['Content-Type']
    deviceBody = utils.deserialize(result, format)
    utils.saveData(deviceBody, format)
t1 = time.time()

predictedClasses = predictMatrix(imgsTest)
nErrorTest = NTEST - np.sum(predictedClasses == lblsTest)
print("Test errors:", nErrorTest, "%:", nErrorTest / NTEST * 100.0)

print("Training time:", (t1 - t0))
fileName = "backSoftwithMomentumsLR" + str(lr) + "EPOCH" + str(
    epochs) + "NHID" + str(nHidden) + "ETA" + str(eta) + ".npz"
np.savez("weights/" + fileName,
         W1=W1.get_value(),
         b1=b1.get_value(),
         W2=W2.get_value(),
         b2=b2.get_value())

nErrorTrain = NTRAIN - np.sum(predictMatrix(imgs) == lbls)
print("Train errors:", nErrorTrain, "%:", nErrorTrain / NTRAIN * 100)

algDescription = "Algoritmo con backpropagation en una NN con una capa oculta de \n"
algDescription += str(
    nHidden) + " neuronas con función de activación sigmoidal. Y una capa de\n"
algDescription += "salida tipo softmax. Usando una tasa de aprenzidaje de" + str(
    lr) + "\n y dando "
algDescription += str(
    epochs
) + " al conjunto de train, usando además momentos con una tasa de" + str(eta)
saveData(
    algDescription, predictMatrix(imgs), predictMatrix(imgsTest), t1 - t0,
    "b1withMomentumsLR" + str(lr) + "EPOCH" + str(epochs) + "NHID" +
    str(nHidden) + "ETA" + str(eta) + ".txt")
Beispiel #20
0
def unloading(ev, server, plugin):
    global cfg
    if ev["name"] == name:
        utils.saveData("uuid", cfg)
Beispiel #21
0
def onunload(ev, server, plugin):
    if ev["name"] == name:
        utils.saveData("bot", {"bot": botlist, "loc": loclist, "log": botlog})