コード例 #1
0
def createPreDatasetFromSlices(songname):
    data = []
    filenames = os.listdir("single/dan/")
    filenames = [filename for filename in filenames if filename.endswith('.png')]
    #Randomize file selection for this genre
    shuffle(filenames)

    #Add data (X,y)
    for filename in filenames:
        imgData = getImageData("single/dan/"+"/"+filename, sliceSize)
        label = [0,0,0,0,0,0,0]
        data.append((imgData,label))


    #Shuffle data
    shuffle(data)

    X,y = zip(*data)

    #Prepare for Tflearn at the same time
    train_X = np.array(X[:]).reshape([-1, sliceSize, sliceSize, 1])

    
    print("    DataPreset created! ✅")
        
    #Save
    saveDataPreset(train_X,songname)
コード例 #2
0
def calculate(filename):
    path = os.path.join(os.getcwd(), 'Generate/*')
    print(path)
    files = glob(path)
    for file in files:
        os.remove(file)
    model = createModel(10, sliceSize)
    model.load('model/musicDNN_4.0_60epoch.tflearn')
    print("this is the step before mp3topng")
    mp3topng(filename)
    data = []
    ct = 0
    path = "Generate/"
    for file in os.listdir(path):
        if file.endswith(".png"):
            imgdata = getImageData(path + file, sliceSize)
            data.append(imgdata)
            ct += 1
    pred = model.predict(data)
    add = [0 for i in range(10)]
    for i in range(ct):
        tt = getmax(pred[i])
        #print("max={}".format(tt))
        add[tt] += 1
    print(add)
    return getmax(add)
コード例 #3
0
def createDatasetFromSlices(nbPerGenre, genres, sliceSize, validationRatio,
                            testRatio):
    data = []
    for genre in genres:
        print("-> Adding {}...".format(genre))
        #Get slices in genre subfolder
        filenames = os.listdir(slicesPath + genre)
        filenames = [
            filename for filename in filenames if filename.endswith('0.png')
        ]
        filenames = filenames[:nbPerGenre]
        #Randomize file selection for this genre
        shuffle(filenames)

        #Add data (X,y)
        for filename in filenames:
            imgData = getImageData(slicesPath + genre + "/" + filename,
                                   sliceSize)
            label = [1. if genre == g else 0. for g in genres]
            musicid = filename.split('_')[1]
            data.append((imgData, label, musicid))

    #Shuffle data
    shuffle(data)

    #Extract X and y
    X, y, z = zip(*data)

    #Split data
    validationNb = int(len(X) * validationRatio)
    testNb = int(len(X) * testRatio)
    trainNb = len(X) - (validationNb + testNb)

    #Prepare for Tflearn at the same time
    train_X = np.array(X[:trainNb]).reshape(
        [-1, sliceSize, sliceSize * timeratio, 1])
    train_y = np.array(y[:trainNb])
    train_z = np.array(z[:trainNb])
    validation_X = np.array(X[trainNb:trainNb + validationNb]).reshape(
        [-1, sliceSize, sliceSize * timeratio, 1])
    validation_y = np.array(y[trainNb:trainNb + validationNb])
    validation_z = np.array(z[trainNb:trainNb + validationNb])
    test_X = np.array(X[-testNb:]).reshape(
        [-1, sliceSize, sliceSize * timeratio, 1])
    test_y = np.array(y[-testNb:])
    test_z = np.array(z[-testNb:])
    print("    Dataset created! ")

    #Save
    saveDataset(train_X, train_y, train_z, validation_X, validation_y,
                validation_z, test_X, test_y, test_z, nbPerGenre, genres,
                sliceSize)

    return train_X, train_y, train_z, validation_X, validation_y, validation_z, test_X, test_y, test_z
コード例 #4
0
def addDataArraysToDataset(trainingFilenames, validationFilenames, testFilenames, \
  trainingData, validationData, testingData, genre, genres):
  '''Take arrays of file names and put them into correct section of dataset'''
  # Add data (X,Y)
  for validationFilename in validationFilenames:
    imgData = getImageData(slicesPath + genre + "/" + validationFilename, sliceXSize, sliceYSize, sliceZSize)
    label = [1. if genre == g else 0. for g in genres]
    validationData.append((imgData, label))

  for testFilename in testFilenames:
    imgData = getImageData(slicesPath + genre + "/" + testFilename, sliceXSize, sliceYSize, sliceZSize)
    label = [1. if genre == g else 0. for g in genres]
    # add test file name for voting system later
    songNameWithoutSlices = testFilename.replace('.png', '')[:(testFilename.rfind('_'))]
    testingData.append((imgData, label, songNameWithoutSlices))

  for trainingFilename in trainingFilenames:
    imgData = getImageData(slicesPath + genre + "/" + trainingFilename, sliceXSize, sliceYSize, sliceZSize)
    label = [1. if genre == g else 0. for g in genres]
    trainingData.append((imgData, label))
コード例 #5
0
def getData(origFilename):
    data = []
    print("Classifing: " + origFilename)
    filenames = os.listdir(identifyPath + slicesPath + origFilename + "/")
    filenames = [
        filename for filename in filenames if filename.endswith('.png')
    ]
    for file in filenames:
        imgData = getImageData(
            identifyPath + slicesPath + origFilename + "/" + filename,
            sliceSize)
        data.append(imgData)
    return data
コード例 #6
0
ファイル: try.py プロジェクト: gaodechen/SWC_FINAL
def calculate(filename):
	mp3topng(filename)
	data = []
	ct = 0
	path = "Generate/"
	for file in os.listdir(path):
		if file.endswith(".png"):
			imgdata = getImageData(path+file, sliceSize)
			data.append(imgdata)
			ct += 1
	pred = model.predict(data)
	add = [0 for i in range(10)]
	for i in range(ct):
		tt = getmax(pred[i])
		#print("max={}".format(tt))
		add[tt] += 1
	print(add)
	return getmax(add) 
コード例 #7
0
def createPreDatasetFromSlices(songname):
    data = []
    # for genre in genres:
    #     #print("-> Adding {}...".format(genre))
    #     #Get slices in genre subfolder
    filenames = os.listdir(slicesPrePath + "Pingpu")
    filenames = [
        filename for filename in filenames if filename.endswith('.png')
    ]
    #Randomize file selection for this genre
    shuffle(filenames)

    #Add data (X,y)
    for filename in filenames:
        imgData = getImageData(slicesPrePath + "Pingpu" + "/" + filename,
                               sliceSize)
        label = [0, 0, 0, 0, 0, 0, 0]
        data.append((imgData, label))

    #Shuffle data
    shuffle(data)
    #command = "rm -rf /home/uaqual/Data/SlicesPath/Pingpu/*"
    #print(command)
    #p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=currentPath)
    #output, errors = p.communicate()
    #if errors:
    #    print (errors)  删除Pingpu临时文件夹

    X, y = zip(*data)

    #Prepare for Tflearn at the same time
    train_X = np.array(X[:]).reshape([-1, sliceSize, sliceSize, 1])
    train_y = np.array(y[:])

    #print("    DataPreset created! ✅")

    #Save
    saveDataPreset(train_X, train_y, songname)

    return train_X, train_y
コード例 #8
0
def createDatasetFromSlices(unlabeledFileNames, sliceSize):
    data = []
    # find files in folder
    print("-> Adding {}...".format(unlabeledFileNames))

    #Get slices in unlabeledFileNames subfolder
    for filename in unlabeledFileNames:
        filenames = os.listdir(newSlicesPath + filename)
        filenames = [
            pngFileName for pngFileName in filenames
            if pngFileName.endswith('.png')
        ]
        # get image data and append to data
        for pngFileName in filenames:
            imgData = getImageData(
                newSlicesPath + filename + "/" + pngFileName, sliceSize)
            data.append(imgData)
    print("    Dataset created! ✅")
    #Save
    saveDataset(data)

    return data
コード例 #9
0
def createDatasetFromInputSlices(nbPerGenre, genres, sliceSize,
                                 validationRatio, testRatio):
    data = []
    genre = "Test"
    print("-> Adding {}...".format(genre))
    # Get slices in genre subfolder
    filenames = os.listdir(slicesInputPath + genre)
    filenames = [
        filename for filename in filenames if filename.endswith('.png')
    ]
    filenames = filenames[:nbPerGenre]
    # Randomize file selection for this genre
    shuffle(filenames)

    # Add data (X,y)
    for filename in filenames:
        imgData = getImageData(slicesInputPath + genre + "/" + filename,
                               sliceSize)
        label = 1.
        data.append((imgData, label))

    # Shuffle data
    shuffle(data)

    # Extract X and y
    X, y = zip(*data)

    # Split data
    trainNb = len(X)

    # Prepare for Tflearn at the same time
    train_X = np.array(X[:trainNb]).reshape([-1, sliceSize, sliceSize, 1])
    train_y = np.array(y[:trainNb])
    print("    Dataset created! ✅")

    # Save
    saveInputDataset(train_X, train_y, nbPerGenre, sliceSize)

    return train_X, train_y
コード例 #10
0
def createDatasetFromSlices(rootpath, nbPerGenre, genres, sliceSize,
                            validationRatio, testRatio):
    data = []
    for genre in genres:
        print("-> Adding {}...".format(genre))
        #Get slices in genre subfolder
        filenames = os.listdir(rootpath + slicesPath + genre)
        filenames = [
            filename for filename in filenames if filename.endswith('.png')
        ]
        filenames = filenames[:nbPerGenre]
        #Randomize file selection for this genre
        shuffle(filenames)

        #Add data (X,y)
        for filename in filenames:
            imgData = getImageData(
                rootpath + slicesPath + genre + "/" + filename, sliceSize)
            label = [1. if genre == g else 0. for g in genres]
            data.append((imgData, label))

    #Shuffle data
    shuffle(data)

    #Extract X and y
    X, y = zip(*data)

    #Split data
    validationNb = int(len(X) * validationRatio)
    testNb = int(len(X) * testRatio)
    trainNb = len(X) - (validationNb + testNb)

    #Prepare for Tflearn at the same time
    train_X = np.array(X[:trainNb]).reshape([-1, sliceSize, sliceSize, 1])

    return train_X
コード例 #11
0
                                mode="test")

    #Load weights
    print("[+] Loading weights...")
    model.load('musicDNN.tflearn')
    print("    Weights loaded! ✅")

    testAccuracy = model.evaluate(test_X, test_y)[0]
    print("[+] Test accuracy: {} ".format(testAccuracy))

if "predict" in args.mode:

    model.load('musicDNN.tflearn')
    data = []

    filenames = os.listdir(testPath)
    filenames = [
        filename for filename in filenames if filename.endswith('.png')
    ]

    # Add data (X,y)
    for filename in filenames:
        imgData = getImageData(testPath + filename, sliceSize)
        data.append(imgData)

    predictionSoftmax = model.predict(data)[0]
    predictedIndex = max(enumerate(predictionSoftmax), key=lambda x: x[1])[0]
    print("Prediction:", ["{0:.2f}".format(x) for x in predictionSoftmax],
          "->", predictedIndex)
    print("The genre is: %s" % genres[predictedIndex])
コード例 #12
0
                print("Creating slice: ", (i + 1), "/", nbSamples, "for",
                      newFilename)
                #Extract and save 128x128 sample
                startPixel = i * desiredSize
                imgTmp = img.crop(
                    (startPixel, 1, startPixel + desiredSize, desiredSize + 1))
                imgTmp.save(slicePath +
                            "new/{}_{}.png".format(newFilename[:-4], i))

    #load each image slice with given prefix with getImageData method
    data = []
    spect_files = glob.glob('newSlicePath/new/*.png')
    #append each loaded image slice to a data-array

    for file in spect_files:
        img_array = getImageData(file, sliceSize)
        print(file)
        data.append(img_array)
        os.remove(file)  #remove slices

    os.remove(newSpecPath + newFilename)  #remove spectrogram
    print('Analyzing file...')

    #load model and get predictions
    model.load('musicDNN.tflearn')
    predictionSoftmax = model.predict(data)[0]
    predictedIndex = max(enumerate(predictionSoftmax), key=lambda x: x[1])[0]
    print(predictionSoftmax, '\n')

    print("Prediction:", ["{0:.2f}".format(x) for x in predictionSoftmax],
          "->", predictedIndex)
コード例 #13
0
ファイル: classify.py プロジェクト: aymanzay/Macaron
def classify():
    #List genres
    genres = os.listdir(slicesPath)
    genres = [filename for filename in genres if os.path.isdir(slicesPath+filename)]
    nbClasses = len(genres)

    #Create model
    model = createModel(nbClasses, sliceSize)
    #s = song()
    #lib = library()

    #create spectrogram path if it doesn't exist
    if not os.path.exists(os.path.dirname(predictSpect)):
        try:
            os.makedirs(os.path.dirname(predictSpect))
        except OSError as exc: # Guard against race condition
            if exc.errno != errno.EEXIST:
                raise

    #create slice path if it doesn't exist
    if not os.path.exists(os.path.dirname(predSlicePath)):
        try:
            os.makedirs(os.path.dirname(predSlicePath))
        except OSError as exc: # Guard against race condition
            if exc.errno != errno.EEXIST:
                raise

    counter = 0

    #print ("Creating Spectrogams")
    #parse through all files in library and create spectrograms
    for filename in os.listdir(predictionPath):
        if filename.endswith(".mp3"):
            newFilename = 'new_' + filename
            newFilename = newFilename.replace(".mp3", "")

            if (Path(predictSpect+newFilename)).exists():
                break
            
            if(isMono(predictionPath+filename)):
                command = "cp '{}' '/tmp/{}.wav' remix 1,2".format(predictionPath+filename, newFilename)
            else:
                command = "sox '{}' '/tmp/{}.wav' remix 1,2".format(predictionPath+filename,newFilename)
            p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=currentPath)
            output, errors = p.communicate()
            if errors:
                print (errors)

            #create spectrogram from given file
            filename.replace(".mp3","")
            #print "Creating spectrogram for file {}".format(filename)
            command = "sox '/tmp/{}.wav' -n spectrogram -Y 200 -X 50 -m -r -o '{}.png'".format(newFilename,predictSpect+newFilename)
            p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=currentPath)
            output, errors = p.communicate()
            if errors:
                print (errors)

            #Remove tmp mono track
            #os.remove("/tmp/{}.wav".format(newFilename))
            counter += 1

    subdata = []
    data = []

    #print ("Spectrogams Created! ")
    #slice spectrograms

    #print ("Slicing Spectrogams")
    for newFilename in os.listdir(predictSpect):
        if newFilename.endswith(".png"):
            #slice
            img = Image.open(predictSpect+newFilename)

            width, height = img.size
            nbSamples = int(width/sliceSize)
            width - sliceSize

            #For each sample
            for i in range(nbSamples):
                #print "Creating slice: ", (i+1), "/", nbSamples, "for", newFilename
                #Extract and save 128x128 sample
                startPixel = i*sliceSize
                imgTmp = img.crop((startPixel, 1, startPixel + sliceSize, sliceSize + 1))
                imgTmp.save(predSlicePath+"{}_{}.png".format(newFilename[:-4],i))

                img_array = getImageData(predSlicePath+newFilename[:-4]+"_"+str(i)+".png", sliceSize)
                
                #append each loaded image to a sub-data array, and break to new subdata element when name changes
                subdata.append(img_array)
                #os.remove()
            
            #append sub-data array to super array
            data.append(subdata)
            subdata = []


    #print ("Slices Created! ")

    model.load('python/musicDNN.tflearn')
    #print ("Model loaded! ")

    #print data

    #parse through super array predicting each one 
    #and assign name to song object then append song object to songList in library
    #print ("Predicting")
    for vec in data:
        predictionSoftmax = model.predict(vec)[0]
        predictedIndex = max(enumerate(predictionSoftmax), key=lambda x:x[1])[0]
        s.vector = predictionSoftmax
        s.name = filename in os.listdir(predictionPath)
        s.genre = genres[predictedIndex]
        lib.songList.append(s.vector)
        lib.labels.append(s.genre)
    

    for x in lib.songList:
        print x
コード例 #14
0
ファイル: classify.py プロジェクト: aymanzay/Macaron
def test_classify():

    #initial check to see if an arrays.txt file already exists and matches songs in the library
    if (os.path.is_file("arrays.txt")):

    
    subdata = []
    data = []

    #print ("Spectrogams Created! ")
    for newFilename in os.listdir(predictSpect):
        if newFilename.endswith(".png"):
            #slice
            img = Image.open(predictSpect+newFilename)

            width, height = img.size
            nbSamples = int(width/sliceSize)
            width - sliceSize

            #For each sample
            for i in range(nbSamples):
                img_array = getImageData(predSlicePath+newFilename[:-4]+"_"+str(i)+".png", sliceSize)
                subdata.append(img_array)
        data.append(subdata)
        subdata = []
    #print ("Slices Created! ")

    model.load('python/musicDNN.tflearn')
    #print ("Model loaded! ")

    #print data

    #parse through super array predicting each one 
    #and assign name to song object then append song object to songList in library
    #print ("Predicting")
    for vec in data:
        predictionSoftmax = model.predict(vec)[0]
        predictedIndex = max(enumerate(predictionSoftmax), key=lambda x:x[1])[0]
        s.vector = predictionSoftmax
        s.name = filename in os.listdir(predictionPath)
        s.genre = genres[predictedIndex]
        lib.songList.append(s.vector)
        lib.labels.append(s.genre)
    

    out = open('arrays.txt', 'w')
    for x in lib.songList:
        out.write("%s\n" % x)
    

def knnRequest(index):
    #model.load('python/musicDNN.tflearn')
    file_content = open('middle.txt')
    counter = 0
    arrays = []
    for line in file_content:
        arrays = line.split(",")

    data = []
    temp = []
    for vec in arrays:
        temp.append(vec)
        counter = counter + 1
        if counter == 8:
            data.append(temp)
            temp = []
            counter = 0
    
    for vec in data:
        maxV = np.argmax(vec)
        sGenre = genres[maxV]
        lib.labels.append(sGenre)

    #print lib.labels

    dataSet = np.asarray(data, dtype=np.float32)
    inputS = dataSet[0]
    k = 20
    output = kNNclassify(inputS, dataSet, lib.labels, k)
    
    out = open('translated.txt', 'w')
    for item in output:
        print item
        out.write("%s\n" % item)


if "classify" in sys.argv[1]:
    test_classify()

if "generate" in sys.argv[1]:
    index = sys.argv[2]
    knnRequest(index)
コード例 #15
0
for i in range(nbSamples):
    #Extract and save 128x128 sample
    startPixel = i * desiredSize
    imgTmp = img.crop(
        (startPixel, 1, startPixel + desiredSize, desiredSize + 1))
    imgTmp.save(slicePath + "{}/{}_{}.png".format("", newFilename[:-4], i))
print("Slices Created... ")

# array
nbClasses = 13
filenames = os.listdir(slicePath)
print(filenames)
data = []
trainNb = 0
for filename in filenames:
    imgData = getImageData(slicePath + "/" + filename, sliceSize)
    data.append((imgData))
    trainNb = trainNb + 1

feedData = np.array(data[:trainNb]).reshape([-1, sliceSize, sliceSize, 1])

print(feedData)

model = createModel(nbClasses, sliceSize)
print("Model Created")
model.load('musicDNN.tflearn')

print("++++++++++++++++++++++++++++++++++++", data[14],
      "+++++++++++++++++++++++++++++++++++++")

results = model.predict(feedData)
コード例 #16
0
def Send_File_to_Server():
	try:

		if(os.path.isfile('received')):
			return

		desiredSize = 128
		port = 12345
		time.sleep(1)
		#Create path if not existing
		slicePath = preTemp+"predictSlice\\";
		if not os.path.exists(os.path.dirname(slicePath)):
			try:
				os.makedirs(os.path.dirname(slicePath))
			except OSError as exc: # Guard against race condition
				if exc.errno != errno.EEXIST:
					raise

		if isMono(file):
			command = 'cp "{}" "{}.mp3"'.format(file,preTemp+newFilename)
		else:
			command = 'sox "{}" "{}.mp3" remix 1,2'.format(file,preTemp+newFilename)
		p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False, cwd=currentPath)
		output, errors = p.communicate()
		if errors:
			print (errors)

		#Create spectrogram
		command = 'sox "{}.mp3" -n spectrogram -Y 200 -X {} -m -r -o "{}.png"'.format(preTemp+newFilename,pixelPerSecond,preTemp+newFilename)
		p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=False, cwd=currentPath)
		output, errors = p.communicate()
		if errors:
			print (errors)

		#Remove tmp mono track
		if os.path.exists("{}.mp3".format(preTemp+newFilename)):
			os.remove("{}.mp3".format(preTemp+newFilename))

		# Load the full spectrogram
		img = Image.open(preTemp+newFilename+".png")

		#Compute approximate number of 128x128 samples
		width, height = img.size
		nbSamples = int(width/desiredSize)
		width - desiredSize

		#For each sample
		for i in range(nbSamples):
			#Extract and save 128x128 sample
			startPixel = i*desiredSize
			imgTmp = img.crop((startPixel, 1, startPixel + desiredSize, desiredSize + 1))
			imgTmp.save(slicePath+"{}/{}_{}.png".format("",newFilename[:-4],i))
		print ("Slices Created... ")

		# Create Array
		nbClasses = 13
		filenames = os.listdir(slicePath)
		print (filenames)
		data = []
		trainNb = 0
		for filename in filenames:
			imgData = getImageData(slicePath+"/"+filename, sliceSize)
			data.append((imgData))
			trainNb = trainNb + 1


		print ("FEED_DATA #############################################################")
		feedData = np.array(data[:trainNb]).reshape([-1, sliceSize, sliceSize, 1])
		print(feedData)
		datatosend = feedData.tolist()

		print ("data_arr created ..........")
		print ("sending data ....")

		s=socket.socket()
		# hostname = socket.gethostbyname('baap')
		s.connect(("127.0.0.1", port))
		data = s.send(pickle.dumps(datatosend))
		s.shutdown(socket.SHUT_WR)


		print("slices ready to rock!!!")


		print("data send")


		f = open('file2send','wb') # Open in binary

		l = s.recv(1024)
		timeout = time.time() + 10   # 5 minutes from now
		while (l):
			print ("Receiving...")
			f.write(l)
			l = s.recv(1024)
		f.close()
		os.rename('file2send', 'received')
		s.close()
		print("Receivied")
		

	except Exception as e:
		print (e)
		raise
		return
コード例 #17
0
ファイル: datasetTools.py プロジェクト: nlml/MusicLearning
def createDatasetFromSlices(nbPerGenre,
                            genres,
                            sliceSize,
                            validationRatio,
                            testRatio,
                            seed=1):

    datasetName = getDatasetName(nbPerGenre, sliceSize)

    rng = np.random.RandomState(seed)

    data = {}
    filenames = {}

    # Get all the filenames for every genre and partition into train valid test
    for genre in genres:
        # Get slices in genre subfolder
        filenames[genre] = list(sorted(os.listdir(slicesPath + genre)))
        filenames[genre] = [
            filename for filename in filenames[genre]
            if filename.endswith('.png')
        ]
        filenames[genre] = filenames[genre][:nbPerGenre]
        # Randomize file selection for this genre
        rng.shuffle(filenames[genre])
        filenames[genre] = dict(
            zip(['train', 'validation', 'test'],
                partition_into_train_val_test(rng, filenames[genre], 0.4,
                                              0.1)))

    # Read images and create / save dataset from train/val/test filename partitions
    for tvt in ['train', 'validation', 'test']:
        print("-> Creating {} set...".format(tvt))
        Xs, ys = [], []
        if tvt not in data:
            data[tvt] = []
        for genre in genres:
            print("->-> Adding {}...".format(genre))
            # Add data (X,y)
            X = np.array([
                getImageData(os.path.join(slicesPath + genre, filename),
                             sliceSize) for filename in filenames[genre][tvt]
            ]).astype(np.float32)
            single_y = np.array([1. if genre == g else 0.
                                 for g in genres]).reshape(1, -1)
            y = np.tile(single_y.astype(np.float32),
                        [len(filenames[genre][tvt]), 1])
            Xs.append(X)
            ys.append(y)

        # Concatenate over genres
        X, y = (np.concatenate(Xs, 0).astype(np.float32),
                np.concatenate(ys, 0).astype(np.float32))

        # Random shuffle X and y
        perm = rng.permutation(len(X))
        X = X[perm]
        y = y[perm]

        if not os.path.exists(datasetPath):
            os.makedirs(datasetPath)

        # Save dataset to disk
        h5fdump(X, "{}{}_X_{}.p".format(datasetPath, tvt, datasetName))
        h5fdump(y, "{}{}_y_{}.p".format(datasetPath, tvt, datasetName))
コード例 #18
0
ファイル: ak.py プロジェクト: ediminished/live-equalizer
nbSamples = int(width / desiredSize)
width - desiredSize

#For each sample
#for i in range(nbSamples):
#Extract and save 128x128 sample
#	startPixel = i*desiredSize
#	imgTmp = img.crop((startPixel, 1, startPixel + desiredSize, desiredSize + 1))
#	imgTmp.save(mypath+"{}/{}_{}.png".format("",newNameIs[:-4],i))
#	print ("Slices Created... ")		# Create Array
nbClasses = 13
fileNames = os.listdir(mypath)
data = []
trainNb = 0
for i in fileNames:
    imgData = getImageData(mypath + "/" + i, sliceSize)
    data.append((imgData))
    trainNb = trainNb + 1
print(
    "FEED_DATA #############################################################")
feedData = np.array(data[:trainNb]).reshape([-1, sliceSize, sliceSize, 1])
print(feedData)
datatosend = feedData.tolist()

print("data_arr created ..........")

#foo = {'text': 'Hello HTTP #1 **cool**, and #1!'}
json_data = json.dumps(datatosend)
print('askl')
conn.request('POST', '/post', json_data, headers)
print("sent")