images = glob.glob('../bee_project/*.jpg')
	for f_name in images:
		if count >= batch_size:
			X_train = numpy.array(data,dtype='float32')
			y_train = numpy.array(labels,dtype='int32')
			data = []
			labels = []
			count = 0
			yield (X_train,y_train)

		im = Image.open(f_name,mode='r')
		dat = numpy.asarray(im).astype('float32') / 255
		im.close()
		dat = numpy.rollaxis(dat,2)
		data.append(dat)
		
		#hacky way of getting classification from filename
		jpg_idx = f_name.find('.jpg')
		labels.append(int(f_name[jpg_idx-1:jpg_idx]))

		count+=1
		
	yield (X_train,y_train)

for X_epoch,y_epoch in generate_data():
	#fit the batchsize to our model
	convNet.partial_fit(X_epoch,y_epoch)

with open('convNet.pickle','wb') as f:
	#save our model to a file
	pickle.dump(convNet,f,-1)