def __init__(self, model_path, X_train_mean_path): self.model = load_model(model_path) self.model.compile(optimizer="adam", loss="mse") self.X_mean = np.load(X_train_mean_path) self.mean_angle = np.array([-0.004179079]) print(self.mean_angle) self.img0 = None self.state = deque(maxlen=2) self.threshold = 0.2 #self.nc = NCoverage(self.model,self.threshold) s1 = self.model.get_layer('sequential_1') self.nc1 = NCoverage(s1, self.threshold) #print(s1.summary()) s2 = self.model.get_layer('sequential_2') #print(s2.summary()) self.nc2 = NCoverage(s2, self.threshold) s3 = self.model.get_layer('sequential_3') #print(s3.summary()) self.nc3 = NCoverage(s3, self.threshold) i1 = self.model.get_layer('input_1') self.i1_model = Kmodel(input=self.model.inputs, output=i1.output)
def __init__(self, cnn_json_path, cnn_weights_path, lstm_json_path, lstm_weights_path, only_layer=""): self.cnn = self.load_from_json(cnn_json_path, cnn_weights_path) self.encoder = self.load_encoder(cnn_json_path, cnn_weights_path) self.lstm = self.load_from_json(lstm_json_path, lstm_weights_path) # hardcoded from final submission model self.scale = 16. self.timesteps = 100 self.threshold_cnn = 0.1 self.threshold_lstm = 0.4 self.timestepped_x = np.empty((1, self.timesteps, 8960)) self.nc_lstm = NCoverage(self.lstm, self.threshold_lstm) self.nc_encoder = NCoverage(self.encoder, self.threshold_cnn, exclude_layer=['pool', 'fc', 'flatten'], only_layer=only_layer) self.steps = deque()
def epoch_testgen_coverage(index, dataset_path): model_name = "cnn" image_size = (128, 128) threshold = 0.2 weights_path = './weights_HMB_2.hdf5' # Change to your model weights seed_inputs1 = os.path.join(dataset_path, "hmb3/") seed_labels1 = os.path.join(dataset_path, "hmb3/hmb3_steering.csv") seed_inputs2 = os.path.join(dataset_path, "Ch2_001/center/") seed_labels2 = os.path.join(dataset_path, "Ch2_001/CH2_final_evaluation.csv") # Model build # --------------------------------------------------------------------------------- model_builders = { 'V3': (build_InceptionV3, preprocess_input_InceptionV3, exact_output), 'cnn': (build_cnn, normalize_input, exact_output) } if model_name not in model_builders: raise ValueError("unsupported model %s" % model_name) model_builder, input_processor, output_processor = model_builders[ model_name] model = model_builder(image_size, weights_path) print('model %s built...' % model_name) filelist1 = [] for file in sorted(os.listdir(seed_inputs1)): if file.endswith(".jpg"): filelist1.append(file) filelist2 = [] for file in sorted(os.listdir(seed_inputs2)): if file.endswith(".jpg"): filelist2.append(file) with open(seed_labels1, 'rb') as csvfile1: label1 = list(csv.reader(csvfile1, delimiter=',', quotechar='|')) label1 = label1[1:] with open(seed_labels2, 'rb') as csvfile2: label2 = list(csv.reader(csvfile2, delimiter=',', quotechar='|')) label2 = label2[1:] nc = NCoverage(model, threshold) index = int(index) #seed inputs with open('result/epoch_coverage_70000_images.csv', 'ab', 0) as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) if index == 0: writer.writerow([ 'index', 'image', 'tranformation', 'param_name', 'param_value', 'threshold', 'covered_neurons', 'total_neurons', 'covered_detail', 'y_hat', 'label' ]) if index / 2 == 0: if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = read_transformed_image(seed_image, image_size) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k]: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('-') csvrecord.append('-') csvrecord.append('-') csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("seed input done") #Translation if index / 2 >= 1 and index / 2 <= 10: #for p in xrange(1,11): p = index / 2 params = [p * 10, p * 10] if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_translation(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #seed_image = read_image(os.path.join(seed_inputs1, filelist1[j]),image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('translation') csvrecord.append('x:y') csvrecord.append(':'.join(str(x) for x in params)) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("translation done") #Scale if index / 2 >= 11 and index / 2 <= 20: #for p in xrange(1,11): p = index / 2 - 10 params = [p * 0.5 + 1, p * 0.5 + 1] if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_scale(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('scale') csvrecord.append('x:y') csvrecord.append(':'.join(str(x) for x in params)) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("scale done") #Shear if index / 2 >= 21 and index / 2 <= 30: #for p in xrange(1,11): p = index / 2 - 20 #for p in xrange(1,11): params = 0.1 * p if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_shear(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('shear') csvrecord.append('factor') csvrecord.append(params) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("sheer done") #Rotation if index / 2 >= 31 and index / 2 <= 40: p = index / 2 - 30 params = p * 3 if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_rotation(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('rotation') csvrecord.append('angle') csvrecord.append(params) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("rotation done") #Contrast if index / 2 >= 41 and index / 2 <= 50: p = index / 2 - 40 params = 1 + p * 0.2 if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_contrast(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('contrast') csvrecord.append('gain') csvrecord.append(params) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("contrast done") #Brightness if index / 2 >= 51 and index / 2 <= 60: p = index / 2 - 50 params = p * 10 if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_brightness2(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('brightness') csvrecord.append('bias') csvrecord.append(params) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("brightness done") #blur if index / 2 >= 61 and index / 2 <= 70: p = index / 2 - 60 params = p if index % 2 == 0: input_images = xrange(1, 501) else: input_images = xrange(501, 1001) for i in input_images: j = i * 5 csvrecord = [] seed_image = cv2.imread( os.path.join(seed_inputs1, filelist1[j])) seed_image = image_blur(seed_image, params) seed_image = read_transformed_image(seed_image, image_size) #new_covered1, new_total1, result = model.predict_fn(seed_image) test_x = input_processor(seed_image.astype(np.float32)) #print('test data shape:', test_x.shape) yhat = model.predict(test_x) #print('steering angle: ', yhat) ndict = nc.update_coverage(test_x) new_covered1, new_total1, p = nc.curr_neuron_cov() tempk = [] for k in ndict.keys(): if ndict[k] == True: tempk.append(k) tempk = sorted(tempk, key=lambda element: (element[0], element[1])) covered_detail = ';'.join(str(x) for x in tempk).replace(',', ':') nc.reset_cov_dict() #print(covered_neurons) #covered_neurons = nc.get_neuron_coverage(test_x) #print('input covered {} neurons'.format(covered_neurons)) #print('total {} neurons'.format(total_neurons)) filename, ext = os.path.splitext(str(filelist1[j])) if label1[j][0] != filename: print(filename + " not found in the label file") continue if j < 2: continue param_name = "" if params == 1: param_name = "averaging:3:3" if params == 2: param_name = "averaging:4:4" if params == 3: param_name = "averaging:5:5" if params == 4: param_name = "GaussianBlur:3:3" if params == 5: param_name = "GaussianBlur:5:5" if params == 6: param_name = "GaussianBlur:7:7" if params == 7: param_name = "medianBlur:3" if params == 8: param_name = "medianBlur:5" if params == 9: param_name = "averaging:6:6" if params == 10: param_name = "bilateralFilter:9:75:75" csvrecord.append(j - 2) csvrecord.append(str(filelist1[j])) csvrecord.append('blur') csvrecord.append(param_name) csvrecord.append(param_name) csvrecord.append(threshold) csvrecord.append(new_covered1) csvrecord.append(new_total1) csvrecord.append(covered_detail) csvrecord.append(yhat[0][0]) csvrecord.append(label1[j][1]) print(csvrecord) writer.writerow(csvrecord) print("all done")
def epoch_guided(dataset_path): model_name = "cnn" image_size = (128, 128) threshold = 0.2 weights_path = './weights_HMB_2.hdf5' # Change to your model weights seed_inputs1 = os.path.join(dataset_path, "hmb3/") seed_labels1 = os.path.join(dataset_path, "hmb3/hmb3_steering.csv") seed_inputs2 = os.path.join(dataset_path, "Ch2_001/center/") seed_labels2 = os.path.join(dataset_path, "Ch2_001/CH2_final_evaluation.csv") new_inputs = "./new/" # Model build # --------------------------------------------------------------------------------- model_builders = { 'V3': (build_InceptionV3, preprocess_input_InceptionV3, exact_output), 'cnn': (build_cnn, normalize_input, exact_output) } if model_name not in model_builders: raise ValueError("unsupported model %s" % model_name) model_builder, input_processor, output_processor = model_builders[ model_name] model = model_builder(image_size, weights_path) print('model %s built...' % model_name) filelist1 = [] for file in sorted(os.listdir(seed_inputs1)): if file.endswith(".jpg"): filelist1.append(file) truth = {} with open(seed_labels1, 'rb') as csvfile1: label1 = list(csv.reader(csvfile1, delimiter=',', quotechar='|')) label1 = label1[1:] for i in label1: truth[i[0] + ".jpg"] = i[1] newlist = [] for file in sorted(os.listdir(new_inputs)): if file.endswith(".jpg"): newlist.append(file) flag = 0 #flag:0 start from beginning #flag:1 initialize from pickle files ''' Pickle files are used for continuing the search after rerunning the script. Delete all pkl files and generated images for starting from the beginnning. ''' if os.path.isfile("epoch_covdict2.pkl") and \ os.path.isfile("epoch_stack.pkl") and \ os.path.isfile("epoch_queue.pkl") and \ os.path.isfile("generated.pkl"): with open('epoch_covdict2.pkl', 'rb') as input: covdict = pickle.load(input) with open('epoch_stack.pkl', 'rb') as input: epoch_stack = pickle.load(input) with open('epoch_queue.pkl', 'rb') as input: epoch_queue = pickle.load(input) with open('generated.pkl', 'rb') as input: generated = pickle.load(input) flag = 1 nc = NCoverage(model, threshold) if flag == 0: filewrite = "wb" epoch_queue = deque() epoch_stack = [] generated = 0 else: nc.set_covdict(covdict) filewrite = "ab" print("initialize from files and continue from previous progress") C = 0 # covered neurons P = 0 # covered percentage T = 0 # total neurons transformations = [ image_translation, image_scale, image_shear, image_rotation, image_contrast, image_brightness2, image_blur ] params = [] params.append(list(xrange(-50, 50))) params.append(list(map(lambda x: x * 0.1, list(xrange(5, 20))))) params.append(list(map(lambda x: x * 0.1, list(xrange(-5, 5))))) params.append(list(xrange(-30, 30))) params.append(list(map(lambda x: x * 0.1, list(xrange(1, 20))))) params.append(list(xrange(-21, 21))) params.append(list(xrange(1, 11))) maxtrynumber = 10 maximages = 200 cache = deque() image_count = 0 #load nc, generation, population with open('result/epoch_rq3_100_2.csv', filewrite, 0) as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) if flag == 0: writer.writerow([ 'id', 'seed image(root)', 'parent image', 'new generated image', 'number of generated images', 'total_covered', 'total_neurons', 'coverage_percentage', 'transformations', 'yhat', 'baseline', 'label' ]) #initialize population and coverage print("compute coverage of original population") input_images = xrange(1, 101) for i in input_images: j = i * 50 epoch_queue.append(os.path.join(seed_inputs1, filelist1[j])) while len(epoch_queue) > 0: current_seed_image = epoch_queue[0] print(str(len(epoch_queue)) + " images are left.") if len(epoch_stack) == 0: epoch_stack.append(current_seed_image) image = cv2.imread(current_seed_image) test_x = read_transformed_image(image, image_size) test_x = input_processor(test_x.astype(np.float32)) nc.update_coverage(test_x) baseline_yhat = model.predict(test_x) #image_count = 0 while len(epoch_stack) > 0: try: image_file = epoch_stack[-1] print("current image in stack " + image_file) image = cv2.imread(image_file) new_generated = False for i in xrange(maxtrynumber): tid = random.sample([0, 1, 2, 3, 4, 5, 6], 2) if len(cache) > 0: tid[0] = cache.popleft() transinfo = "" new_image = image for j in xrange(2): transformation = transformations[tid[j]] #random choose parameter param = random.sample(params[tid[j]], 1) param = param[0] transinfo = transinfo + transformation.__name__ + ':' + str( param) + ';' print("transformation " + transformation.__name__ + " parameter " + str(param)) new_image = transformation(new_image, param) new_x = read_transformed_image(new_image, image_size) test_x = input_processor(new_x.astype(np.float32)) if nc.is_testcase_increase_coverage(test_x): print( "Generated image increases coverage and will be added to population." ) cache.append(tid[0]) cache.append(tid[1]) generated = generated + 1 #image_count = image_count + 1 name = os.path.basename( current_seed_image) + '_' + str( generated) + '.jpg' name = os.path.join(new_inputs, name) cv2.imwrite(name, new_image) epoch_stack.append(name) nc.update_coverage(test_x) yhat = model.predict(test_x) covered, total, p = nc.curr_neuron_cov() C = covered T = total P = p csvrecord = [] csvrecord.append(100 - len(epoch_queue)) csvrecord.append( os.path.basename(current_seed_image)) if len(epoch_stack) >= 2: parent = os.path.basename(epoch_stack[-2]) else: parent = os.path.basename(current_seed_image) child = os.path.basename( current_seed_image) + '_' + str( generated) + '.jpg' csvrecord.append(parent) csvrecord.append(child) csvrecord.append(generated) csvrecord.append(C) csvrecord.append(T) csvrecord.append(P) csvrecord.append(transinfo) csvrecord.append(yhat[0][0]) csvrecord.append(baseline_yhat[0][0]) csvrecord.append( truth[os.path.basename(current_seed_image)]) print(csvrecord) writer.writerow(csvrecord) new_generated = True break else: print( "Generated image does not increase coverage.") if not new_generated: epoch_stack.pop() save_object(epoch_stack, 'epoch_stack.pkl') save_object(epoch_queue, 'epoch_queue.pkl') save_object(nc.cov_dict, 'epoch_covdict2.pkl') save_object(generated, 'generated.pkl') except ValueError: print("value error") epoch_stack.pop() save_object(epoch_stack, 'epoch_stack.pkl') save_object(epoch_queue, 'epoch_queue.pkl') epoch_queue.popleft()