예제 #1
0
    cs.add_conditions(aug_space.get_conditions())

    for estimator_id in algorithm_candidates:
        sub_cs = get_model_config_space(estimator_id,
                                        include_estimator=False,
                                        include_aug=False)
        parent_hyperparameter = {
            'parent': estimator_choice,
            'value': estimator_id
        }
        cs.add_configuration_space(estimator_id,
                                   sub_cs,
                                   parent_hyperparameter=parent_hyperparameter)

    return cs


cs = get_pipeline_config_space(['resnet34', 'mobilenet'])
dataset = 'cifar10'
data_dir = 'data/img_datasets/%s/' % dataset
image_data = ImageDataset(data_path=data_dir, train_val_split=True)

hpo_evaluator = DLEvaluator(cs.get_default_configuration(),
                            IMG_CLS,
                            scorer=get_metric('acc'),
                            dataset=image_data,
                            device='cuda',
                            image_size=32,
                            seed=1)
hpo_evaluator(cs.get_default_configuration())
예제 #2
0
        transforms.RandomResizedCrop(560),
        transforms.RandomCrop(331),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(560),
        transforms.CenterCrop(331),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

data_dir = 'data/img_datasets/hymenoptera_data'
image_data = ImageDataset(data_path=data_dir, data_transforms=data_transforms)

print(image_data.train_dataset.classes)
print(image_data.val_dataset.classes)
image_data.load_test_data()
print(image_data.test_dataset.classes)

image_data = ImageDataset(data_path=data_dir,
                          data_transforms=data_transforms,
                          train_val_split=True,
                          val_split_size=0.3)

print(image_data.train_dataset.classes)
print(image_data.train_sampler, image_data.val_sampler)
예제 #3
0
import sys
import argparse

sys.path.append(os.getcwd())

from mindware.datasets.image_dataset import ImageDataset
from mindware.estimators import ImageClassifier

parser = argparse.ArgumentParser()
parser.add_argument('--n_jobs', type=int, default=3)
parser.add_argument('--dataset', type=str, default='extremely_small')
args = parser.parse_args()
n_jobs = args.n_jobs
dataset = args.dataset
print('n_jobs is set to %d.' % n_jobs)

data_dir = 'data/img_datasets/%s/' % dataset
image_data = ImageDataset(data_path=data_dir, train_val_split=True)
clf = ImageClassifier(
    time_limit=10000000,
    include_algorithms=['mobilenet', 'resnet34', 'efficientnet'],
    evaluation='partial',
    image_size=32,
    ensemble_method='ensemble_selection',
    skip_profile=True,
    n_jobs=n_jobs)
clf.fit(image_data)
image_data.set_test_path(data_dir)
print(clf.predict_proba(image_data))
pred = clf.predict(image_data)
예제 #4
0
from mindware.components.models.img_classification.resnext import ResNeXtClassifier
from mindware.components.models.img_classification.senet import SENetClassifier
from mindware.components.models.img_classification.nasnet import NASNetClassifier
from mindware.components.models.img_classification.mobilenet import MobileNettClassifier
from mindware.components.models.img_classification.efficientnet import EfficientNetClassifier
from mindware.components.models.img_classification.resnet110_32 import ResNet110_32Classifier
from mindware.components.models.img_classification.densenet190_32 import DenseNet190_32Classifier
from mindware.components.models.img_classification.densenet100_32 import DenseNet100_32Classifier

phase = 'test'

if phase == 'fit':
    # data_dir = 'data/img_datasets/hymenoptera_data/'
    data_dir = 'data/img_datasets/cifar10/'
    # data_dir = 'data/img_datasets/dogs-vs-cats/'
    image_data = ImageDataset(data_path=data_dir, train_val_split=True, image_size=32)
    clf = ImageClassifier(time_limit=3600 * 10,
                          include_algorithms=['resnet44_32'],
                          evaluation='partial',
                          max_epoch=120,
                          skip_profile=True,
                          ensemble_method='ensemble_selection',
                          n_jobs=3)
    clf.fit(image_data, opt_method='whatever')
    image_data.set_test_path(data_dir)
    print(clf.score(image_data, mode='val'))
    print(clf.score(image_data))
    pred = clf.predict(image_data)
    timestamp = time.time()
    with open('es_output_%s.pkl' % timestamp, 'wb') as f:
        pkl.dump(pred, f)
예제 #5
0
import os
import sys
from torchvision import transforms
from sklearn.metrics import accuracy_score

sys.path.append(os.getcwd())

from mindware.datasets.image_dataset import ImageDataset
from mindware.estimators import ImageClassifier
from mindware.components.models.img_classification import add_classifier
from mindware.components.models.img_classification.resnext import ResNeXtClassifier

# Add user-defined classifier
add_classifier(ResNeXtClassifier)

data_dir = 'data/img_datasets/hymenoptera_data/'
image_data = ImageDataset(data_path=data_dir)
clf = ImageClassifier(time_limit=7200,
                      include_algorithms=['ResNeXtClassifier'],
                      ensemble_method='ensemble_selection',
                      config_file_path='tiny_cs.txt')
clf.fit(image_data)
image_data.set_test_path(data_dir)
print(clf.predict_proba(image_data))
print(clf.predict(image_data))
예제 #6
0
                     'resnet50', 'resnet101', 'densenet121']
parser.add_argument('--networks', type=str, default=','.join(networks_template))
args = parser.parse_args()
n_jobs = args.n_jobs
dataset = args.dataset
networks = args.networks.split(',')
time_limit = args.time_limit
opt_method = args.opt_method
evaluation = args.eval
max_epoch = args.max_epoch
img_size = args.img_size
print('n_jobs is set to %d.' % n_jobs)
print('networks included', networks)

data_dir = 'data/img_datasets/%s/' % dataset
image_data = ImageDataset(data_path=data_dir, train_val_split=True, image_size=img_size, val_split_size=0.1)
clf = ImageClassifier(time_limit=time_limit,
                      include_algorithms=networks,
                      evaluation=evaluation,
                      ensemble_method=None,
                      skip_profile=True,
                      max_epoch=max_epoch,
                      n_jobs=n_jobs)
image_data.set_test_path(data_dir)

if opt_method == 'ours':
    clf.fit(image_data)
else:
    clf.fit(image_data, opt_method=opt_method)
res = clf.get_runtime_history()