Ejemplo n.º 1
0
#labels=np.load(path+'labels.npy').argmax(axis=4)
#predictions=np.load(predictions_path).argmax(axis=4)

print("Loading labels and predictions...")

prediction_type = 'model'
results_path = "../"
#path=results_path+dataset+'/'
#prediction_path=path+predictions_path
path_test = '../../../../../dataset/dataset/' + dataset + '_data/patches_bckndfixed/test/'
print('path_test', path_test)

#prediction_type = 'model'
if prediction_type == 'npy':
    predictionsLoader = PredictionsLoaderNPY()
    predictions, labels = predictionsLoader.loadPredictions(
        predictions_path, path + 'labels.npy')
elif prediction_type == 'model':
    #model_path=results_path + 'model/'+dataset+'/'+prediction_filename
    print('model_path', predictions_path)

    predictionsLoader = PredictionsLoaderModel(path_test)
    model = predictionsLoader.loadModel(predictions_path)

#================= load labels and predictions

#class_n=np.max(predictions)+1
#print("class_n",class_n)
#labels[labels==class_n]=255 # background
Ejemplo n.º 2
0
def experiment_analyze(
        small_classes_ignore,
        dataset='cv',
        prediction_filename='prediction_DenseNetTimeDistributed_blockgoer.npy',
        prediction_type='npy',
        mode='each_date',
        debug=1,
        model_n=0):
    #path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/seq2seq_ignorelabel/'+dataset+'/'
    base_path = "../../results/convlstm_results/"
    path = base_path + dataset + '/'
    prediction_path = path + prediction_filename
    path_test = '../../../../dataset/dataset/' + dataset + '_data/patches_bckndfixed/test/'
    print('path_test', path_test)

    #prediction_type = 'model'
    if prediction_type == 'npy':
        predictionsLoader = PredictionsLoaderNPY()
        predictions, label_test = predictionsLoader.loadPredictions(
            prediction_path, path + 'labels.npy')
    elif prediction_type == 'model':
        model_path = base_path + 'model/' + dataset + '/' + prediction_filename
        print('model_path', model_path)

        #predictionsLoader = PredictionsLoaderModel(path_test)
        #predictionsLoader = PredictionsLoaderModelNto1(path_test)
        predictionsLoader = PredictionsLoaderModelNto1FixedSeqFixedLabel(
            path_test, dataset=dataset)

        predictions, label_test = predictionsLoader.loadPredictions(model_path)
        deb.prints(np.unique(np.concatenate((predictions, label_test),
                                            axis=0)))

    #predictions=np.load(prediction_path, allow_pickle=True)
    #label_test=np.load(path+'labels.npy', allow_pickle=True)

    print("Loaded predictions unique: ",
          np.unique(predictions, return_counts=True))
    print("Loaded label test unique: ",
          np.unique(label_test, return_counts=True))

    print("Loaded predictions shape: ", predictions.shape)
    print("Loaded label test shape: ", label_test.shape)

    prediction_unique, prediction_count = np.unique(predictions,
                                                    return_counts=True)
    label_test_unique, label_test_count = np.unique(label_test,
                                                    return_counts=True)
    print(np.sum(prediction_count[:]))
    print(np.sum(label_test_count[:-1]))

    #pdb.set_trace()
    class_n = predictions.shape[-1]
    mode = 'each_date'
    skip_crf = True
    if mode == 'each_date':
        metrics_t = {'f1_score': [], 'overall_acc': [], 'average_acc': []}

        # if dataset=='cv':
        # 	important_classes=[]
        # 	for date in range(14):
        # 		if date<=7:
        # 			date_important_classes=[0,6,8]

        for t in range(label_test.shape[1]):
            predictions_t = predictions.copy()
            label_test_t = label_test.copy()
            #skip_crf = model_n<2 #prediction_filename.startswith('model_best_BUnet4ConvLSTM_128fl_')
            print("###skip_crf###")
            print(skip_crf)
            print(prediction_filename)

            label_test_t, predictions_t = labels_predictions_filter_transform(
                label_test_t,
                predictions_t,
                class_n=class_n,
                debug=debug,
                small_classes_ignore=small_classes_ignore,
                important_classes=None,
                dataset=dataset,
                skip_crf=skip_crf,
                t=t)
            metrics = metrics_get(label_test_t,
                                  predictions_t,
                                  only_basics=True,
                                  debug=debug,
                                  detailed_t=t)
            print(metrics)
            #		pdb.set_trace()
            metrics_t['f1_score'].append(metrics['f1_score'])
            metrics_t['overall_acc'].append(metrics['overall_acc'])
            metrics_t['average_acc'].append(metrics['average_acc'])

        print(metrics_t)
        #pdb.set_trace()
        return metrics_t
    elif mode == 'global':

        label_test, predictions = labels_predictions_filter_transform(
            label_test, predictions, class_n=class_n)

        print(np.unique(predictions, return_counts=True))
        print(np.unique(label_test, return_counts=True))

        metrics = metrics_get(label_test, predictions)

        return metrics
def experiment_analyze(dataset='cv',
		prediction_filename='prediction_DenseNetTimeDistributed_blockgoer.npy',
		prediction_type='npy', mode='each_date',debug=1):
	#path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/seq2seq_ignorelabel/'+dataset+'/'
	base_path="../../results/convlstm_results/"
	path=base_path+dataset+'/'
	prediction_path=path+prediction_filename
	path_test='../../../../dataset/dataset/'+dataset+'_data/patches_bckndfixed/test/'
	print('path_test',path_test)
	
	#prediction_type = 'model'
	if prediction_type=='npy':
		predictionsLoader = PredictionsLoaderNPY()
		predictions, label_test = predictionsLoader.loadPredictions(prediction_path,path+'labels.npy')
	elif prediction_type=='model':	
		model_path=base_path + 'model/'+dataset+'/'+prediction_filename
		print('model_path',model_path)

		predictionsLoader = PredictionsLoaderModel(path_test)
		predictions, label_test = predictionsLoader.loadPredictions(model_path)


#		mode='each_date',debug=1):
#	path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/seq2seq_ignorelabel/'+dataset+'/'

#	prediction_path=path+prediction_filename
#	predictions=np.load(prediction_path)
#	label_test=np.load(path+'labels.npy')
#	if debug>0:
#		print(predictions.shape)
#		print(label_test.shape)
	class_n=predictions.shape[-1]

	if mode=='each_date':
		metrics_t={'f1_score':[],'overall_acc':[],
			'average_acc':[]}
		label_test_v=label_test.argmax(axis=4).flatten()
		label_test_v=label_test_v[label_test_v<class_n]

		label_unique=np.unique(label_test_v)
		print("label_unique",label_unique)
		labels_unique_t=[]
		for t in range(label_test.shape[1]):
			predictions_t = predictions[:,t,:,:,:]
			label_test_t = label_test[:,t,:,:,:]

			label_test_t,predictions_t = labels_predictions_filter_transform(
				label_test_t, predictions_t, class_n=class_n,
				debug=debug)
			print("predictions_t",np.unique(
				predictions_t).shape)
			print("label_test_t",np.unique(
				label_test_t).shape)

			label_unique_t=np.unique(label_test_t)
			predictions_unique_t=np.unique(predictions_t)
			classes_t = np.unique(np.concatenate((label_unique_t,predictions_unique_t),0))
			##print("classes_t.shape",classes_t.shape)
			metrics = metrics_get(label_test_t, predictions_t,
				only_basics=True, debug=debug)	
			##print("metrics['f1_score'].shape",metrics['f1_score'].shape)
			#metrics_t['f1_score'].append(metrics['f1_score'])
			#metrics_t['overall_acc'].append(metrics['overall_acc'])
			metrics_ordered={'f1_score':np.zeros(label_unique.shape)}
			valid_classes_counter=0
			##print(metrics_ordered['f1_score'])
			for clss in range(label_unique.shape[0]):
				#print(clss)
				if np.any(classes_t==clss): # If this timestep t has class clss
					##print("1",valid_classes_counter)
					##print("2",classes_t[valid_classes_counter])
					##print("3",metrics['f1_score'][valid_classes_counter])
					
					metrics_ordered['f1_score'][clss]=metrics['f1_score'][valid_classes_counter]
					valid_classes_counter+=1
				if np.any(label_unique_t==clss):
					pass
				else:
					metrics_ordered['f1_score'][clss]=np.nan

			metrics_t['f1_score'].append(metrics_ordered['f1_score'])
			labels_unique_t.append(label_unique_t)
			print("class_n",t,metrics['f1_score'].shape)

		print(metrics_t)
		return metrics_t
	elif mode=='global':
		
		label_test,predictions=labels_predictions_filter_transform(
			label_test,predictions, class_n=class_n)

		print(np.unique(predictions,return_counts=True))
		print(np.unique(label_test,return_counts=True))

		metrics=metrics_get(label_test,predictions)

		return metrics
#labels=np.load(path+'labels.npy').argmax(axis=4)
#predictions=np.load(predictions_path).argmax(axis=4)

print("Loading labels and predictions...")

prediction_type = 'model'
results_path = "../"
#path=results_path+dataset+'/'
#prediction_path=path+predictions_path
path_test = '../../../../../dataset/dataset/' + dataset + '_data/patches_bckndfixed/test/'
print('path_test', path_test)

#prediction_type = 'model'
if prediction_type == 'npy':
    predictionsLoader = PredictionsLoaderNPY()
    predictions, labels = predictionsLoader.loadPredictions(
        predictions_path, path + 'labels.npy')
elif prediction_type == 'model':
    #model_path=results_path + 'model/'+dataset+'/'+prediction_filename
    print('model_path', predictions_path)

    predictionsLoader = PredictionsLoaderModel(path_test)
    model = predictionsLoader.loadModel(predictions_path)

#================= load labels and predictions

#class_n=np.max(predictions)+1
#print("class_n",class_n)
#labels[labels==class_n]=255 # background
def experiment_analyze(
        small_classes_ignore,
        dataset='cv',
        prediction_filename='prediction_DenseNetTimeDistributed_blockgoer.npy',
        mode='each_date',
        debug=1):
    #path='/home/lvc/Jorg/igarss/convrnn_remote_sensing/results/seq2seq_ignorelabel/'+dataset+'/'
    path = "../../results/convlstm_results/" + dataset + '/'
    prediction_path = path + prediction_filename
    path_test = '../../../../dataset/dataset/' + dataset + '_data/patches_bckndfixed/test/'

    predictionsLoader = PredictionsLoaderNPY()
    predictions, label_test = predictionsLoader.loadPredictions(
        prediction_path, path + 'labels.npy')

    #predictionsLoader = PredictionsLoaderModel(path_test)
    #predictions, label_test = predictionsLoader.loadPredictions(path[:-3] + 'model/'+dataset+'/'+prediction_filename)

    #predictions=np.load(prediction_path, allow_pickle=True)
    #label_test=np.load(path+'labels.npy', allow_pickle=True)

    print("Loaded predictions unique: ",
          np.unique(predictions.argmax(axis=-1), return_counts=True))
    print("Loaded label test unique: ",
          np.unique(label_test.argmax(axis=-1), return_counts=True))

    print("Loaded predictions shape: ", predictions.shape)
    print("Loaded label test shape: ", label_test.shape)

    prediction_unique, prediction_count = np.unique(
        predictions.argmax(axis=-1), return_counts=True)
    label_test_unique, label_test_count = np.unique(label_test.argmax(axis=-1),
                                                    return_counts=True)
    print(np.sum(prediction_count[:]))
    print(np.sum(label_test_count[:-1]))

    #pdb.set_trace()
    class_n = predictions.shape[-1]

    if mode == 'each_date':
        metrics_t = {'f1_score': [], 'overall_acc': [], 'average_acc': []}

        # if dataset=='cv':
        # 	important_classes=[]
        # 	for date in range(14):
        # 		if date<=7:
        # 			date_important_classes=[0,6,8]

        for t in range(label_test.shape[1]):
            predictions_t = predictions[:, t, :, :, :]
            label_test_t = label_test[:, t, :, :, :]

            label_test_t, predictions_t = labels_predictions_filter_transform(
                label_test_t,
                predictions_t,
                class_n=class_n,
                debug=debug,
                small_classes_ignore=small_classes_ignore,
                important_classes=None)
            metrics = metrics_get(label_test_t,
                                  predictions_t,
                                  only_basics=True,
                                  debug=debug,
                                  detailed_t=t)
            metrics_t['f1_score'].append(metrics['f1_score'])
            metrics_t['overall_acc'].append(metrics['overall_acc'])
            metrics_t['average_acc'].append(metrics['average_acc'])

        print(metrics_t)
        #pdb.set_trace()
        return metrics_t
    elif mode == 'global':

        label_test, predictions = labels_predictions_filter_transform(
            label_test, predictions, class_n=class_n)

        print(np.unique(predictions, return_counts=True))
        print(np.unique(label_test, return_counts=True))

        metrics = metrics_get(label_test, predictions)

        return metrics