Example #1
0
def main():
    # Get label encoder
    lb = LabelBinarizer()
    lbenc = lb.fit(utils.get_classes())

    # Get train data
    X_train, y_train, train_filenames = utils.get_train(
        '../input/train', list(lbenc.classes_), img_width, img_height)

    # Create and train model
    model = train(X_train, y_train, epochs=100, batch_size=32)

    print("+++++++++++++++++++++++++++++++++++++++++++")

    # Load model ...
    #model = load_model('../models/'+ 'model2_f0.86/'+ 'model2-64-0.341.h5')

    # Get test data
    X_test, X_test_id = utils.get_test('../input/test', img_width, img_height)
    # Predict on test data
    preds = model.predict(X_test, verbose=1)

    # Create submission
    utils.create_submission(lbenc.inverse_transform(preds),
                            X_test_id,
                            output_path="../submissions/",
                            filename=modelname,
                            isSubmission=True)
    utils.to_csv_ens(lbenc.inverse_transform(preds),
                     preds,
                     X_test_id,
                     utils.get_classes(),
                     output_path="../submissions/",
                     filename=modelname)
    print('Finished.')
def eval():
    # Get classes
    lb = LabelBinarizer()
    lbenc = lb.fit(utils.get_classes())

    # Load model
    model = load_model('../models/' + 'model.h5')

    # Get test data
    X_test, X_test_id = utils.get_test('../input/test', img_width, img_height)

    # Predict on test data
    preds = model.predict(X_test, verbose=1)

    # Create ensembling file
    df_csv = utils.to_csv_ens(lbenc.inverse_transform(preds),
                              preds,
                              X_test_id,
                              utils.get_classes(),
                              output_path="../submissions/",
                              filename=modelname)
    # Create submission file
    subm = utils.create_submission(lbenc.inverse_transform(preds),
                                   X_test_id,
                                   output_path="../submissions/",
                                   filename=modelname,
                                   isSubmission=True)
Example #3
0
def _main(args):
    os.chdir(os.path.join(os.getcwd(), 'yad2k-em3d'))
    # Parse input arguments
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    data_path = os.path.expanduser(args.data_path)
    test_results = os.path.expanduser(args.test_results)
    # Extract anchors, classes, images, and boxes from input files
    anchors = utils.get_anchors(anchors_path)
    classes = utils.get_classes(classes_path)
    images, boxes = utils.get_data(data_path)
    test_results = scipy.io.loadmat(test_results)
    t = test_results['output']
    cv2.imshow("TESTING", images[20])
    cv2.waitKey(0)
    ipdb.set_trace()
    classes = [
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
        20, 21, 22, 23, 24, 25
    ]
    class_names = [
        'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
        'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
    ]
    drawn = utils.draw_boxes(images[0],
                             t[:1],
                             classes,
                             class_names,
                             scores=t[2])
    cv2.imshow('drawm', drawn)
    cv2.waitKey(0)
Example #4
0
def test():
    """Plot all defined classes in fuzzy.set package"""

    import fuzzy.set

    objects = get_classes(fuzzy.set)

    # add demo sets
    from fuzzy.set.Polygon import Polygon
    objects["Polygon (Demo)"] = Polygon([
            (-1.2,0),
            (-1.2,1),
            (-0.8,0.3),
            (-0.3,0.2),
            (-0.2,0.4),
            (-0.1,0.0),
            (0.0,0.0),
            (0.3,1),
            (0.6,0.5),
            (0.6,0.1),
            (1.3,0.6),
        ])
    for name in sorted(objects):
        if name in ["Set", "Function","Polygon"]:
            continue
        obj = objects[name]

        try:
            plotSet(obj,name)
        except:
            import traceback
            traceback.print_exc()
Example #5
0
def get_val_loader(args, val_index, batch_size=32, dev_mode=False, val_num=3000):
    classes, stoi = get_classes(args.cls_type, args.start_index, args.end_index)
    _, val_meta = get_train_val_meta(args.cls_type, args.start_index, args.end_index)

    # filter, keep label counts <= args.max_labels
    val_meta = val_meta[val_meta['obj_num'] <= args.max_labels]

    if len(classes) < 7172:
        classes_set = set(classes)
        val_meta['tmp_label_count'] = val_meta['LabelName'].map(lambda x: len(set(x.split()) & classes_set))
        val_meta = val_meta[val_meta['tmp_label_count'] > 0]

    #print(val_meta.shape)
    #print(val_meta['LabelName'].str.split().apply(pd.Series).stack().nunique())
    val_meta = shuffle(val_meta, random_state=1234).iloc[:val_num]

    #print(val_meta.shape)

    if dev_mode:
        val_meta = val_meta.iloc[:10]
    img_dir = settings.TRAIN_IMG_DIR
    
    val_set = ImageDataset(False, val_meta['ImageID'].values.tolist(), img_dir, classes, stoi, val_index, val_meta['LabelName'].values.tolist())

    val_loader = data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4, drop_last=False)
    val_loader.num = val_set.num

    return val_loader
Example #6
0
def main(data_file, sep):
    classes_column = "Hogwarts House"
    data, _ = read_data(data_file, sep)
    num_data = get_numerics(data, get_str=False)
    class_list = get_classes(data, classes_column)

    matter = find_most_equal(num_data, class_list)
    print("Matter with the most homogeneous repartition between houses : %s" % matter)

    fig = plt.figure("Histogram")
    # for each matter
    for i, key in enumerate(num_data.keys()):
        ax = fig.add_subplot(4, 4, i + 1)
        ax.set_title(key)
        classes = []
        # for each class
        for c in class_list:
            c_tab = class_tab(num_data[key], class_list[c])
            classes.append(c_tab)
        for cat in classes:
            clean_cat = []
            for elem in cat:
                if not math.isnan(elem):
                    clean_cat.append(elem)
            ax.hist(clean_cat, alpha=0.5)
    fig.legend(class_list.keys(), loc = (0.8, 0))
    fig.tight_layout()
    plt.show(block = True)
def interactive(name, params):
    """interactive use: plot complement using given params"""
    import fuzzy.complement
    objects = get_classes(fuzzy.complement)
    try:
        complement = objects[name]
    except KeyError:
        print "%s is unknown." % name
        return

    g = getGnuplot()
    from fuzzy.set.Triangle import Triangle
    set = Triangle()
    set_name = "Triangle"

    if len(params) > 0:
        plotComplement(complement,
                       name,
                       set,
                       set_name,
                       params,
                       gnuplot=g,
                       interactive=True)
    else:
        plotComplement(complement,
                       name,
                       set,
                       set_name,
                       gnuplot=g,
                       interactive=True)

    g.close()
Example #8
0
def get_train_loader(args, batch_size=32, dev_mode=False, train_shuffle=True):
    classes, stoi = get_classes(args.cls_type, args.start_index, args.end_index)
    train_meta, _ = get_train_val_meta(args.cls_type, args.start_index, args.end_index)

    # filter, keep label counts <= args.max_labels
    train_meta = train_meta[train_meta['obj_num'] <= args.max_labels]
    
    print(train_meta.shape)
    if len(classes) < 7172:
        classes_set = set(classes)
        train_meta['tmp_label_count'] = train_meta['LabelName'].map(lambda x: len(set(x.split()) & classes_set))
        train_meta = train_meta[train_meta['tmp_label_count'] > 0]
        #tuning_labels['LabelName'].map(lambda x: sum([cls_counts[c] for c in x.split()]))
        #print('>>', train_meta.shape)

    # resample training data
    train_img_ids = get_weighted_sample(train_meta, 1024*100)
    df_sampled = train_meta.set_index('ImageID').loc[train_img_ids]

    if dev_mode:
        train_meta = train_meta.iloc[:10]
        train_shuffle = False
    img_dir = settings.TRAIN_IMG_DIR
    
    train_set = ImageDataset(True, train_img_ids, img_dir, classes, stoi, None, df_sampled['LabelName'].values.tolist())
    
    train_loader = data.DataLoader(train_set, batch_size=batch_size, shuffle=train_shuffle, num_workers=4, drop_last=True)#, collate_fn=train_set.collate_fn, drop_last=True)
    train_loader.num = train_set.num

    return train_loader
Example #9
0
def _main(args):
    # Parse input arguments
    model_path = os.path.expanduser(args.model_path)
    data_path = os.path.expanduser(args.data_path)
    anchors_path = os.path.expanduser(args.anchors_path)
    classes_path = os.path.expanduser(args.classes_path)
    output_path = os.path.expanduser(args.output_path)

    batch = args.batch

    assert model_path.endswith('.h5'), 'model_path must have .h5 extension'
    assert output_path.endswith('.mat'), 'output_path must have .mat extension'

    # Extract anchors and classes from input files
    anchors = utils.get_anchors(anchors_path)
    classes = utils.get_classes(classes_path)
    images, boxes = utils.get_data(data_path)

    # Create model and load weights from file
    model_body, model = create_model(images.shape[1:-1], int(boxes.shape[-1]), anchors, classes)
    model_body.load_weights(model_path)
    model.summary()

    # Pass input data through the network in batches
    output = model_body.predict(images[0:batch, :, :, :, :])
    for i in range(batch, images.shape[0], batch):
        output = np.concatenate((output, model_body.predict(images[i:i + batch, :, :, :, :])))

    # Save output file
    if output_path != '':
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
    scipy.io.savemat(output_path, mdict={'output': output})
    print('Results saved to file: {}'.format(output_path))
Example #10
0
def load():
    """
    加载模型,回传模型和模型参数
    """

    # 模型参数
    class_names = get_classes(classes_path)
    num_classes = len(class_names)

    anchors = get_anchors(anchors_path)
    num_anchors = len(anchors)

    input_shape = (416, 416)

    train_weights_path = log_dir + '/ep169-loss17.356-val_loss6.844.h5'

    # 获取模型结构,加载权重
    image_input = Input(shape=(None, None, 3))

    model = yolo_body(image_input, num_anchors//3, num_classes)
    print('Get YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))

    model.load_weights(train_weights_path, by_name=True, skip_mismatch=True)
    print('Load weights {}.'.format(train_weights_path))

    # 回传参数
    param = [class_names, num_classes, anchors, input_shape]

    return model, param
Example #11
0
def get_train_val_loaders(args, batch_size=32, dev_mode=False, train_shuffle=True, val_num=4000):
    classes, stoi = get_classes(args.cls_type, args.start_index, args.end_index)
    train_meta, val_meta = get_train_val_meta(args.cls_type, args.start_index, args.end_index)

    #sampler = BalancedSammpler(train_meta, classes, stoi, balanced=args.balanced, min_label_num=500, max_label_num=700)
    #df1 = train_meta.set_index('ImageID')
    #sampled_train_meta = df1.loc[sampler.img_ids]

    train_meta = train_meta[train_meta['obj_num'] <= 10]
    val_meta = val_meta[val_meta['obj_num'] <= 10]

    # resample training data
    train_img_ids = get_weighted_sample(train_meta, 1024*100)
    df_sampled = train_meta.set_index('ImageID').loc[train_img_ids]

    #print(df_sampled.shape)
    if val_num is not None:
        val_meta = val_meta.iloc[:val_num]

    #if dev_mode:
    #    train_meta = train_meta.iloc[:10]
    #    val_meta = val_meta.iloc[:10]
    img_dir = settings.TRAIN_IMG_DIR
    #train_set = ImageDataset(True, sampled_train_meta.index.values.tolist(), img_dir, classes, stoi, sampled_train_meta['LabelName'].values.tolist())
    train_set = ImageDataset(True, train_img_ids, img_dir, classes, stoi, df_sampled['LabelName'].values.tolist())
    
    val_set = ImageDataset(False, val_meta['ImageID'].values.tolist(), img_dir, classes, stoi, val_meta['LabelName'].values.tolist())

    train_loader = data.DataLoader(train_set, batch_size=batch_size, shuffle=train_shuffle, num_workers=4, collate_fn=train_set.collate_fn, drop_last=True)
    train_loader.num = train_set.num

    val_loader = data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=val_set.collate_fn, drop_last=False)
    val_loader.num = val_set.num

    return train_loader, val_loader
def get_model2(input_shape=(128, 128, 3)):
    model = Sequential()
    model.add(BatchNormalization(input_shape=input_shape))
    # Convolution + Pooling Layer
    model.add(Conv2D(32, (5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # Convolution + Pooling Layer
    model.add(Conv2D(32, (5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # Convolution + Pooling Layer
    model.add(Conv2D(64, (5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # Convolution + Pooling Layer
    model.add(Conv2D(64, (5, 5), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # Flatten
    model.add(Flatten())
    # Fully-Connection
    model.add(Dense(64, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    model.add(Dense(32, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    # Output
    model.add(Dense(len(utils.get_classes()), activation='softmax'))

    optimizer = Adam(1e-4)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
    return model
Example #13
0
def ensemble_np(args):
    if args.th < 0:
        raise AssertionError('Please specify threshold')

    np_files = args.ensemble_np.split(',')
    if len(np_files) < 1:
        raise AssertionError('no np files')
    outputs = []
    for np_file in np_files:
        if not os.path.exists(np_file):
            raise AssertionError('np file does not exist')
        output = np.load(np_file)
        print(np_file, output.shape)
        outputs.append(output)
    ensemble_outputs = np.mean(outputs, 0)
    preds = (ensemble_outputs > args.th).astype(np.uint8)

    classes, _ = get_classes(args.cls_type, args.start_index, args.end_index)
    label_names = []
    for row in preds:
        label_names.append(get_label_names(row, classes))

    if args.check:
        print(label_names[:10])
        return

    create_submission(args, label_names, args.sub_file)
Example #14
0
def save_error(label, prediction, file):
    classes = utils.get_classes()
    #classes = train_model_mp.get_classes()
    path = "./errors/%s_classified_as_%s" % (classes[label], classes[prediction])
    if not os.path.isdir(path):
        os.makedirs(path)
    shutil.copyfile(file, path + "/" + os.path.basename(file))
Example #15
0
def evaluate_model_fast(model_name, path):
    model = keras.models.load_model(model_name)
    images, labels, files = utils.load_images_classes(path)

    predictions = []
    errors = []
    num_classes = len(utils.get_classes())
    confusion_matrix = np.zeros((num_classes, num_classes))

    print("Evaluating model (fast version) on %i instances..." % images.shape[0])
    start = time.time()    
    prediction_probs = model.predict(images)
    print("Elased time: %f seconds" % (time.time() - start))

    for i in range(0,images.shape[0]):
        predicted_class = np.argmax(prediction_probs[i, :])
        predictions.append(predicted_class)
        label = int(labels[i])
        confusion_matrix[label,predicted_class] += 1
        if label == predicted_class:
            errors.append(0)
        else:
            save_error(label, predicted_class, files[i])
            errors.append(1)

    print_statistics(confusion_matrix)
Example #16
0
def evaluate_model(model_filepath, path):
    model_name = model_filepath[28:-3]
    model = keras.models.load_model(model_filepath)

    images, labels, files = utils.load_images_classes(path)

    predictions = []
    errors = []
    num_classes = len(utils.get_classes())
    confusion_matrix = np.zeros((num_classes, num_classes))
    print("Evaluating model...")
    if os.path.isdir("./errors"):
        shutil.rmtree("./errors")    
    
    for i in range(0,images.shape[0]):
        if i % 1000 == 0:
            print(i)
        prediction_probs = predict_image_set_with_augmentation(images[i:i+1, :, :, :],model)[0]
        predicted_class = np.argmax(prediction_probs)
        predictions.append(predicted_class)
        label = int(labels[i])
        confusion_matrix[label, predicted_class] += 1
        if label == predicted_class:
            errors.append(0)
        else:
            errors.append(1)

    np.save('./Results/Testing_Metrics/Confusion_Matrix/' + model_name + '_cm.npy', confusion_matrix)
    print('./Results/Testing_Metrics/Confusion_Matrix/' + model_name + '_cm.npy')
    print_statistics(confusion_matrix)
Example #17
0
def predict_softmax(args):
    model, _ = create_model(args)
    model = model.cuda()
    model.eval()
    test_loader = get_test_loader(args, batch_size=args.batch_size, dev_mode=args.dev_mode)

    preds = None
    with torch.no_grad():
        for i, x in enumerate(test_loader):
            x = x.cuda()
            #output = torch.sigmoid(model(x))
            output, _ = model(x)
            output = F.softmax(output, dim=1)
            pred = (output > 0.03).byte()  #  use threshold

            if preds is None:
                preds = pred.cpu()
            else:
                preds = torch.cat([preds, pred.cpu()], 0)
            print('{}/{}'.format(args.batch_size*(i+1), test_loader.num), end='\r')

    classes, _ = get_classes(args.cls_type, args.start_index, args.end_index)
    label_names = []
    preds = preds.numpy()
    print(preds.shape)
    n_classes = 7172
    for row in preds:
        label_names.append(' '.join([classes[i] for i in range(n_classes) if row[i] == 1]))
    if args.dev_mode:
        print(len(label_names))
        print(label_names)

    create_submission(args, label_names, args.sub_file)
Example #18
0
def test():
    """Plot all defined classes in fuzzy.set package"""

    import fuzzy.set

    objects = get_classes(fuzzy.set)

    # add demo sets
    from fuzzy.set.Polygon import Polygon
    objects["Polygon (Demo)"] = Polygon([
        (-1.2, 0),
        (-1.2, 1),
        (-0.8, 0.3),
        (-0.3, 0.2),
        (-0.2, 0.4),
        (-0.1, 0.0),
        (0.0, 0.0),
        (0.3, 1),
        (0.6, 0.5),
        (0.6, 0.1),
        (1.3, 0.6),
    ])
    for name in sorted(objects):
        if name in ["Set", "Function", "Polygon"]:
            continue
        obj = objects[name]

        try:
            plotSet(obj, name)
        except:
            import traceback
            traceback.print_exc()
Example #19
0
def predict(image_path, model_path, top_k, class_names_json):
    '''
    Function takes inputs and prints top predicted class name, label and probability for flower image.
    It also prints the top k results for flower image.

    INPUT: image_path - (str) path to image.
           model_path - (str) path to tensorflow model (h5).
           top_k - (int) Top K results requested.
           class_names (json_file) Dict [class names : class ids]
    OUTPUT: NONE
    '''

    #Getting mapping file for class index and class names
    class_names = get_classes(class_names_json)

    #Reads Tensorflow model.
    model = tf.keras.models.load_model(
        model_path, custom_objects={'KerasLayer': hub.KerasLayer})

    #open image
    img = Image.open(image_path)

    #put image into array
    image_numpy = np.asarray(img)

    #resize image for processing
    processed_image = process_image(image_numpy)

    #Predict image using tensorflow model
    prob_preds = model.predict(np.expand_dims(processed_image, axis=0))
    prob_preds = prob_preds[0]

    #Get top_k results as tensors.
    values, index = tf.math.top_k(prob_preds, k=top_k)

    #Conver tensors to numpy for use.
    probs = values.numpy().tolist()
    class_index = index.numpy().tolist()

    #Map class ids to class names.
    pred_label_names = []
    for i in class_index:
        pred_label_names.append(class_names[str(i)])

    #1 Result
    print(
        f"""\n\n Class most likely based on the following {image_path} with the highest probaility as listed: \n
          class_id: {class_index[0]} \n
          class_label: {pred_label_names[0]} \n
          probability: {str(round(float(probs[0]) *100, 2)) + '%'} \n\n\n
          """)

    if top_k > 1:
        print(f"\n Top {top_k} probs", probs)
        print(f"\n Top {top_k} class names", pred_label_names)
        print(f"\n Top {top_k} class ids", class_index)
        print("\n\n")
Example #20
0
def create_sub_from_raw_csv(args, csv_file):
    classes, _ = get_classes()
    df = pd.read_csv(csv_file)
    df = df[classes]

    outputs = torch.from_numpy(df.values)
    _, preds = outputs.topk(3, 1, True, True)
    preds = preds.numpy()
    create_submission(args, preds, args.sub_file)
Example #21
0
def create_submission(preds, outfile):
    classes, _ = get_classes()
    label_names = []
    for row in preds:
        label_names.append(' '.join([classes[i] for i in row]))

    meta = pd.read_csv(settings.SAMPLE_SUBMISSION)
    meta['word'] = label_names
    meta.to_csv(outfile, index=False)
Example #22
0
 def get(self):
     user = users.get_current_user()
     classes = utils.get_classes(user)
     lessons, lessons_map = utils.get_lessons(user)
     template = jinja_env.get_template("templates/index.html")
     self.response.out.write(template.render({'lessons': lessons,
                                            'user_email': user.email(),
                                            'user_name': user.nickname(),
                                            'classes': classes,
                                            'logout_url': users.create_logout_url("/")}))
Example #23
0
def train():
    data_file_path = 'shuf_train_file'
    log_dir = 'logs/'
    classes_path = 'my_class.txt'
    classes_name = get_classes(classes_path)
    num_classes = len(classes_name)
    batch_size = 32

    width = 331
    hight = 331

    base_model = NASNetLarge(input_shape=(width, hight, 3), weights='imagenet', include_top=False, pooling='avg')

    input_tensor = Input(shape=(None, None, 3))
    x = input_tensor
    # x = Lambda(preprocess_input)(x)
    x = base_model(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)

    model = Model(input_tensor, x)

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                                 monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

    val_split = 0.1
    with open(data_file_path, 'r') as f1:
        lines = f1.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    if True:
        for i in range(len(model.layers) - 2):
            model.layers[i].trainable = False
        model.summary()
        model.compile(optimizer=Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['acc'])
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size=batch_size, input_shape=(width, hight),
                                                   num_classes=num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(lines[num_train:], batch_size=batch_size,
                                                                   input_shape=(width, hight), num_classes=num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=10,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save(log_dir + 'trained_weights_stage_1.h5')

    pass
Example #24
0
def create_submission(args, preds, outfile):
    classes, _ = get_classes()
    label_names = []
    for row in preds:
        label_names.append(' '.join([classes[i] for i in row]))

    meta = pd.read_csv(settings.SAMPLE_SUBMISSION)
    if args.dev_mode:
        meta = meta.iloc[:len(label_names)]  # for dev mode
    meta['word'] = label_names
    meta.to_csv(outfile, index=False)
Example #25
0
def get_test_loader(args, batch_size=8, dev_mode=False, tta_index=0):
    img_ids = get_test_ids()
    classes, stoi = get_classes(args.cls_type, args.start_index, args.end_index)

    img_dir = settings.TEST_IMG_DIR
    if dev_mode:
        img_ids = img_ids[:10]
    
    dset = ImageDataset(False, img_ids, img_dir, classes, stoi, tta_index=tta_index)
    dloader = data.DataLoader(dset, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=dset.collate_fn, drop_last=False)
    dloader.num = dset.num
    return dloader
Example #26
0
def save_raw_csv(np_file):
    df = pd.read_csv(settings.SAMPLE_SUBMISSION)

    np_dir = os.path.dirname(np_file)
    csv_file_name = os.path.join(np_dir, 'raw.csv')
    outputs = np.load(np_file)
    classes, _ = get_classes()

    for i, c in enumerate(classes):
        df[c] = outputs[:, i]
    col_names = ['key_id', *classes]
    df.to_csv(csv_file_name, index=False, columns=col_names)
Example #27
0
def test():
    """Show examples for all norm in package fuzzy.norm"""
    objects = get_classes(fuzzy.norm)

    for name in sorted(objects):
        if name in ["Norm","ParametricNorm"]:
            continue
        try:
            norm = objects[name]
            plotNorm(norm,name)
        except:
            import traceback
            traceback.print_exc()
def test():
    """Show examples for all complements in package fuzzy.complement"""
    import fuzzy.set
    import fuzzy.complement

    objects = get_classes(fuzzy.set)
    # add demo sets
    #from fuzzy.set.Polygon import Polygon
    #objects["Polygon (Demo)"] = Polygon([
    #        (-1.2,0),
    #        (-1.2,1),
    #        (-0.8,0.3),
    #        (-0.3,0.2),
    #        (-0.2,0.4),
    #        (-0.1,0.0),
    #        (0.0,0.0),
    #        (0.3,1),
    #        (0.6,0.5),
    #        (0.6,0.1),
    #        (1.3,0.6),
    #    ])

    complements = get_classes(fuzzy.complement)
    #print complements

    for name in sorted(objects):
        if name in ["Set", "Function", "Polygon", "Singleton"]:
            continue
        obj = objects[name]

        for name2 in sorted(complements):
            if name2 in ["Base","Parametric"]:
                continue
            try:
                complement = complements[name2]
                plotComplement(complement,name2,obj,name)
            except:
                import traceback
                traceback.print_exc()
Example #29
0
def test():
    """Show examples for all norm in package fuzzy.norm"""
    objects = get_classes(fuzzy.norm)

    for name in sorted(objects):
        if name in ["Norm", "ParametricNorm"]:
            continue
        try:
            norm = objects[name]
            plotNorm(norm, name)
        except:
            import traceback
            traceback.print_exc()
Example #30
0
def check_classes():
    tmp = get_classes()
    print(tmp[:10])
    classes = set(tmp)
    print(len(classes))
    bbox = build_bbox_dict()
    v = bbox.values()
    print(len(v))
    for x in v:
        for c, _ in x:
            if not (c in classes):
                print(c)
    print('done')
def test():
    """Show examples for all complements in package fuzzy.complement"""
    import fuzzy.set
    import fuzzy.complement

    objects = get_classes(fuzzy.set)
    # add demo sets
    #from fuzzy.set.Polygon import Polygon
    #objects["Polygon (Demo)"] = Polygon([
    #        (-1.2,0),
    #        (-1.2,1),
    #        (-0.8,0.3),
    #        (-0.3,0.2),
    #        (-0.2,0.4),
    #        (-0.1,0.0),
    #        (0.0,0.0),
    #        (0.3,1),
    #        (0.6,0.5),
    #        (0.6,0.1),
    #        (1.3,0.6),
    #    ])

    complements = get_classes(fuzzy.complement)
    #print complements

    for name in sorted(objects):
        if name in ["Set", "Function", "Polygon", "Singleton"]:
            continue
        obj = objects[name]

        for name2 in sorted(complements):
            if name2 in ["Base", "Parametric"]:
                continue
            try:
                complement = complements[name2]
                plotComplement(complement, name2, obj, name)
            except:
                import traceback
                traceback.print_exc()
Example #32
0
    def __init__(self, base_path, image_shape=(320, 240, 3), batch_size=32):
        self.base_path = base_path
        self.training_data_path = os.path.join(base_path, "training_set")
        self.test_data_path = os.path.join(base_path, "testing_set")
        self.classes = get_classes(self.training_data_path)
        self.batch_size = batch_size
        self.image_shape = image_shape

        self.train_ds = tf.keras.preprocessing.image_dataset_from_directory(
            self.training_data_path,
            validation_split=0.2,
            subset="training",
            seed=345,
            image_size=(self.image_shape[0], self.image_shape[1]),
            batch_size=self.batch_size,
            label_mode='categorical',
            class_names=self.classes)

        self.val_ds = tf.keras.preprocessing.image_dataset_from_directory(
            self.training_data_path,
            validation_split=0.2,
            subset="validation",
            seed=345,
            image_size=(self.image_shape[0], self.image_shape[1]),
            batch_size=self.batch_size,
            label_mode='categorical',
            class_names=self.classes)

        self.test_ds = tf.keras.preprocessing.image_dataset_from_directory(
            self.test_data_path,
            seed=123,
            image_size=(self.image_shape[0], self.image_shape[1]),
            batch_size=self.batch_size,
            label_mode='categorical',
            class_names=self.classes)

        normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(
            1. / 255)
        self.train_ds = self.train_ds.map(lambda x, y:
                                          (normalization_layer(x), y))
        self.val_ds = self.train_ds.map(lambda x, y:
                                        (normalization_layer(x), y))
        self.test_ds = self.test_ds.map(lambda x, y:
                                        (normalization_layer(x), y))

        # optimizing performances
        autotune = tf.data.experimental.AUTOTUNE
        self.train_ds = self.train_ds.cache().prefetch(buffer_size=autotune)
        self.val_ds = self.train_ds.cache().prefetch(buffer_size=autotune)
        self.test_ds = self.train_ds.cache().prefetch(buffer_size=autotune)
Example #33
0
 def get(self):
     user = users.get_current_user()
     classes = utils.get_classes(user)
     lessons, lessons_map = utils.get_lessons(user)
     logging.info(" Current LESSON" + self.request.get("lesson"))
     lesson_key=ndb.Key(urlsafe=self.request.get("lesson"))
     questions_for_lesson_query = Question.query(ancestor=lesson_key)
     template = jinja_env.get_template("templates/index.html")
     self.response.out.write(template.render({'lessons': lessons,
                                          'user_email': user.email(),
                                          'user_name': user.nickname(),
                                          'classes': classes,
                                          'logout_url': users.create_logout_url("/"),
                                          'questions_for_lesson': questions_for_lesson_query}))
Example #34
0
def main(data_file, sep):
    classes_column = "Hogwarts House"
    data, _ = read_data(data_file, sep)
    num_data = get_numerics(data, get_str=False)
    class_list = get_classes(data, classes_column)
    fig = plt.figure("Pair plot")
    # for each matter
    for j, key_a in enumerate(num_data.keys()):
        for i, key_b in enumerate(num_data.keys()):
            ax = fig.add_subplot(len(num_data), len(num_data),
                                 len(num_data) * j + i + 1)
            if j == len(num_data) - 1:
                ax.set_xlabel(key_b, rotation=45)
            if i == 0:
                ax.set_ylabel(key_a, rotation=45)
            plt.setp(ax.get_xticklabels(), visible=False)
            plt.setp(ax.get_yticklabels(), visible=False)
            ax.tick_params(axis='both', which='both', length=0)
            if key_b != key_a:
                classes = []
                # for each class
                for c in class_list:
                    c_tab_a = []
                    c_tab_b = []
                    # for each note
                    for idx, val in enumerate(data):
                        if val[classes_column] == c:
                            c_tab_a.append(float(num_data[key_a][idx]))
                            c_tab_b.append(float(num_data[key_b][idx]))
                    classes.append((c_tab_a, c_tab_b))
                for cat in classes:
                    ax.scatter(cat[0], cat[1], alpha=0.5)
            else:
                # for each class
                classes = []
                for c in class_list:
                    c_tab = []
                    # for each note
                    for idx, val in enumerate(num_data[key_a]):
                        if idx in class_list[c]:
                            c_tab.append(float(val))
                    classes.append(c_tab)
                for cat in classes:
                    clean_cat = []
                    for elem in cat:
                        if not math.isnan(elem):
                            clean_cat.append(elem)
                    ax.hist(clean_cat, alpha=0.5)
    plt.show(block=True)
Example #35
0
def interactive(name):
    """interactive use: plot set of given name"""
    import fuzzy.set
    objects = get_classes(fuzzy.set)
    try:
        set = objects[name]
    except KeyError:
        print "%s is unknown." % name 
        return

    g = getGnuplot()

    plotSet(set,name,gnuplot=g,interactive=True)

    g.close()
Example #36
0
def ensemble_csvs(csv_files, weights, sub_file):
    print(csv_files)
    classes, _ = get_classes()
    results = []
    for filename in csv_files:
        print(filename)
        df = pd.read_csv(filename)
        df = df[classes]
        results.append(df.values)
    outputs = np.average(results, axis=0, weights=weights)
    outputs = torch.from_numpy(outputs)
    _, preds = outputs.topk(3, 1, True, True)
    preds = preds.numpy()

    create_submission(preds, sub_file)
Example #37
0
def interactive(name,params):
    """interactive use: plot norm using given params"""
    objects = get_classes(fuzzy.norm)
    try:
        norm = objects[name]
    except KeyError:
        print "%s is unknown." % name 
        return

    g = getGnuplot()

    if len(params) > 0:
        plotNorm(norm,name,params,gnuplot=g,interactive=True)
    else:
        plotNorm(norm,name,gnuplot=g,interactive=True)

    g.close()
def interactive(name,params):
    """interactive use: plot complement using given params"""
    import fuzzy.complement
    objects = get_classes(fuzzy.complement)
    try:
        complement = objects[name]
    except KeyError:
        print "%s is unknown." % name 
        return

    g = getGnuplot()
    from fuzzy.set.Triangle import Triangle
    set  = Triangle()
    set_name = "Triangle"

    if len(params) > 0:
        plotComplement(complement,name,set,set_name,params,gnuplot=g,interactive=True)
    else:
        plotComplement(complement,name,set,set_name,gnuplot=g,interactive=True)

    g.close()
Example #39
0
def profile():
    classes = utils.get_classes(session["username"])
    return render_template("profile.html", username=session['username'], classes=classes)
def test():
    """test all found set classes with defuzzyfication method in specific kind
       of output variable class"""
    import types
    import fuzzy.set
    import fuzzy.defuzzify
    import fuzzy.OutputVariable
    import fuzzy.Adjective
    import fuzzy.set.Polygon

    # sizes of rows
    row1 = 10
    row2 = 25

    sets = get_classes(fuzzy.set)
    # Add tests of a special sets.
    sets["""~
        _
    _  / \  _
___/ \/   \/ \___
"""] = fuzzy.set.Polygon.Polygon([(-2,0),(-1.5,0.5),(-1.,0.5),(-0.5,0.0),(-0.25,1.),(0.25,1.),(0.5,0.0),(1.,0.5),(1.5,0.5),(2,0)])
    sets["""~
      ___
___  /
   \/
"""] = fuzzy.set.Polygon.Polygon([(-1,0.5),(0.,0.),(1.,1.)])
    sets["""~
__
  \  ___
   \/
"""] = fuzzy.set.Polygon.Polygon([(-1,1.0),(0.,0.),(1.,0.5)])
    defuzzy = get_classes(fuzzy.defuzzify)

    for o in sorted(sets):
        set = sets[o]
        # filter out classes without default values
        if o in ["Set","Function","Polygon"]:
            continue

        print "Defuzzification of %s:" % o
        print "%-*s | %s" % (row1,"method","value")
        print "%s-+-%s" % ("-"*row1,"-"*row2)

        for d in sorted(defuzzy):
            defuzzy_ = defuzzy[d]
            # filter out abstract base classes
            if d in ["Base"]:
                continue

            v = fuzzy.OutputVariable.OutputVariable(defuzzify=defuzzy_)

            try:
                a = fuzzy.Adjective.Adjective(set)
                a.setMembership(1.0)
                v.adjectives["test"] = a
                result = v.getValue()
                if isinstance(result,types.FloatType):
                    result = "%.3g" % result
                else:
                    result = str(result)
                print "%-*s | %s" % (row1,d,result) 
            except:
                print "%-*s |         >>> %s <<<" % (row1,d,sys.exc_info()[1])
        print