def shs_test_set_evals(size, method="msd_title", with_duplicates=True):
    """
    :param size: Required prune size of the results
    :param method: (string type) {default:"msd_title"}
        choose the method of experiment available modes are
        ["msd_title", "pre-msd_title", "mxm_lyrics", "title_mxm_lyrics", "pre-title_mxm_lyrics"]
    :param with_duplicates: (boolean) {default:True} include
        or exclude MSD official duplicate tracks from the experiments
    :return:
    """

    es = SearchModule(presets.uri_config)

    if with_duplicates:
        exp = Experiments(es, './data/test_shs.csv', presets.shs_msd)
    else:
        exp = Experiments(es, './data/test_shs.csv', presets.shs_msd_no_dup)

    if method == "msd_title":
        LOGGER.info("\n%s with size %s and duplicates=%s " %
                    (method, size, with_duplicates))
        results = exp.run_song_title_match_task(size=size)

    elif method == "pre-msd_title":
        LOGGER.info("\n%s with size %s and duplicates=%s" %
                    (method, size, with_duplicates))
        results = exp.run_cleaned_song_title_task(size=size)

    elif method == "mxm_lyrics":
        LOGGER.info("\n%s with size %s and duplicates=%s" %
                    (method, size, with_duplicates))
        results = exp.run_mxm_lyrics_search_task(presets.more_like_this,
                                                 size=size)

    elif method == "title_mxm_lyrics":
        LOGGER.info("\n%s with size %s and duplicates=%s" %
                    (method, size, with_duplicates))
        results = exp.run_rerank_title_with_mxm_lyrics_task(size=size,
                                                            with_cleaned=False)

    elif method == "pre-title_mxm_lyrics":
        LOGGER.info("\n%s with size %s and duplicates=%s" %
                    (method, size, with_duplicates))
        results = exp.run_rerank_title_with_mxm_lyrics_task(size=size,
                                                            with_cleaned=True)

    else:
        raise Exception("\nInvalid 'method' parameter for the experiment ! ")

    mean_avg_precision = exp.mean_average_precision(results)
    LOGGER.info("\n Mean Average Precision (MAP) = %s" % mean_avg_precision)

    return
	def test_init(self):
		experiments = Experiments()
		self.assertEqual( experiments.getNumOfExperiments(), 0 )
		self.assertEqual( experiments.getExperiments(), {} )

		try:
			experiments.runAllExperiments()

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Experiments object has no models to run!')

		try:
			experiments.addExperiment('random forest')

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Object must be Experiment object: random forest')

		try:
			experiments.addExperiment( Experiment(1) )

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Experiment name attribute must be string, not <class \'int\'>' )
		self.assertEqual( experiments.getNumOfExperiments(), 0 )

		experiments.addExperiment( Experiment('1') )
		experiments.addExperiment( Experiment('2') )
		experiments.addExperiment( Experiment('3') )
		experiments.addExperiment( Experiment('4') )

		self.assertEqual( experiments.getNumOfExperiments(), 4 )
		self.assertEqual( experiments.getExperimentNames(), ['1', '2', '3', '4'] )
示例#3
0
def main():

    config = get_config_from_json('config.json')
    # create an instance of the model
    model = VAE(config)
    # create experiments instance
    experiments = Experiments(config, model)
    # create trainer instance
    trainer = Trainer(config, model, experiments)
    # train the model
    trainer.train()
def main():
    # ----- settings:
    experiment_type = 1
    split_in_cross_validation_again = False
    find_ranks_in_PSA_again = False
    portion_of_test_in_dataset = 0.3
    number_of_folds = 10
    portion_of_sampled_dataset_vector = [
        0.02, 0.06, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
    ]
    classifiers_for_experiments = [
        'SVM', 'LDA', 'QDA', 'Random Forest', 'Logistic Regression',
        'Gaussian Naive Bayes'
    ]
    path_to_save = './PSA_outputs/'

    # ---- path of dataset:
    path_dataset = './dataset/Breast_cancer_dataset/wdbc_data.txt'
    # ---- read the dataset:
    print(
        '############################## Reading dataset and splitting it to K-fold train and test sets'
    )
    data = pd.read_csv(
        path_dataset, sep=",", header=None
    )  # read text file using pandas dataFrame: https://stackoverflow.com/questions/21546739/load-data-from-txt-with-pandas
    labels_of_classes = ['M', 'B']
    X, y = read_dataset(data=data, labels_of_classes=labels_of_classes)
    experiments = Experiments()
    # # --- saving/loading split dataset in/from folder:
    # if split_in_cross_validation_again:
    #     train_indices_in_folds, test_indices_in_folds, X_train_in_folds, X_test_in_folds, y_train_in_folds, y_test_in_folds = experiments.cross_validation(X=X, y=y, n_splits=number_of_folds, test_size=portion_of_test_in_dataset)
    #     save_variable(train_indices_in_folds, 'train_indices_in_folds', path_to_save=path_to_save)
    #     save_variable(test_indices_in_folds, 'test_indices_in_folds', path_to_save=path_to_save)
    #     save_variable(X_train_in_folds, 'X_train_in_folds', path_to_save=path_to_save)
    #     save_variable(X_test_in_folds, 'X_test_in_folds', path_to_save=path_to_save)
    #     save_variable(y_train_in_folds, 'y_train_in_folds', path_to_save=path_to_save)
    #     save_variable(y_test_in_folds, 'y_test_in_folds', path_to_save=path_to_save)
    # else:
    #     file = open(path_to_save+'train_indices_in_folds.pckl','rb')
    #     train_indices_in_folds = pickle.load(file); file.close()
    #     file = open(path_to_save+'test_indices_in_folds.pckl','rb')
    #     test_indices_in_folds = pickle.load(file); file.close()
    #     file = open(path_to_save+'X_train_in_folds.pckl','rb')
    #     X_train_in_folds = pickle.load(file); file.close()
    #     file = open(path_to_save+'X_test_in_folds.pckl','rb')
    #     X_test_in_folds = pickle.load(file); file.close()
    #     file = open(path_to_save+'y_train_in_folds.pckl','rb')
    #     y_train_in_folds = pickle.load(file); file.close()
    #     file = open(path_to_save+'y_test_in_folds.pckl','rb')
    #     y_test_in_folds = pickle.load(file); file.close()

    # ----- experiments:
    if experiment_type == 1:
        experiments.multi_class_demo()
    def __init__(self):

        # parameters
        self.global_planner = rospy.get_param(
            'social_experiments/global_planner', '')
        self.local_planner = rospy.get_param(
            'social_experiments/local_planner', '')
        self.world_model_name = rospy.get_param(
            'social_experiments/world_model_name', '')
        self.robot_model_name = rospy.get_param(
            'social_experiments/robot_model_name', '')
        self.max_experiments = rospy.get_param(
            'social_experiments/max_experiments', 100)
        self.path_storage = rospy.get_param('social_experiments/path_storage',
                                            '')
        self.robot_vel = rospy.get_param('social_experiments/robot_vel', 0.3)
        self.space_factor_tolerance = rospy.get_param(
            'social_experiments/space_factor_tolerance', 5)
        self.time_factor_tolerance = rospy.get_param(
            'social_experiments/time_factor_tolerance', 5)
        # self.start_service = rospy.get_param('social_experiments/start_service', '/regions/start')
        # self.goal_service = rospy.get_param('social_experiments/goal_service', '/regions/goal')
        self.checkpoint_services = rospy.get_param(
            'social_experiments/checkpoint_services', '')

        if (self.checkpoint_services is ''):
            self.checkpoint_services = []
        else:
            self.checkpoint_services = list(
                self.checkpoint_services.split(" "))

        # log
        rospy.loginfo('global_planner: ' + self.global_planner)
        rospy.loginfo('local_planner: ' + self.local_planner)
        rospy.loginfo('world_model_name: ' + self.world_model_name)
        rospy.loginfo('robot: ' + self.robot_model_name)
        rospy.loginfo('robot vel: ' + str(self.robot_vel))
        rospy.loginfo('space factor tolerance: ' +
                      str(self.space_factor_tolerance))
        rospy.loginfo('time factor tolerance: ' +
                      str(self.time_factor_tolerance))
        rospy.loginfo('max experiments: ' + str(self.max_experiments))
        # rospy.loginfo('start service: ' + str(self.start_service))
        # rospy.loginfo('goal service: ' + str(self.goal_service))
        # rospy.loginfo('checkpoint services: ' + str(self.checkpoint_services))
        print('')

        # data
        self.data = []

        # init experiments
        self.ex = Experiments(self.global_planner, self.local_planner,
                              self.world_model_name, self.robot_model_name)
示例#6
0
def main():
    settings = Settings()
    settings.Initalize_Global_Settings()

    preprocess = Preprocess(settings)
    preprocess.Load_Into_Dataframes()

    analysis = Analysis(preprocess)
    experiments = Experiments(analysis)

    data = analysis.Core(experiments)
    data_experimentals = experiments.Run_Experiments()

    models, best_fit, gals_df = analysis.Mocks_And_Models(experiments)

    plotting = Plotting(preprocess)
    plotting.Plot_Core(data, models, best_fit)
    plotting.Plot_Experiments(data, data_experimentals, models, best_fit)
示例#7
0
    def __init__(self, setting):

        self.setting = setting
        self.mallet_path = setting['malletpath']
        self.number_of_topics = setting['nooftopics']
        self.number_of_iter = setting['noofiterations']

        self.stack_importer = StackImporter(setting)
        self.lda_importer = LDAImporter(setting)
        self.experiments = Experiments(setting)

        self.model = None
        self.corpus = None
        self.dictionary = None
        self.answer_corpus = None

        directory = self.setting['lda_folder']
        file_name = 'local_lda_model' + self.setting['theme'] + '.gs'
        self.path = ''.join([directory, file_name])
示例#8
0
    def __init__(self, setting):

        self.setting = setting

        self.idf_values = None

        self.wiki_corpus = None
        self.wiki_dictionary = None
        self.wiki_vectors = []
        self.wiki_processor = WikiPreprocessor(setting)
        self.wiki_importer = WikiImporter(setting, self.wiki_processor)

        self.stack_corpus = None
        self.answer_vectors = {}
        self.question_vectors = {}
        self.user_vectors = {}
        self.user_content = {}
        self.stack_importer = StackImporter(setting)

        self.esa_importer = ESAImporter(setting)
        self.inverted_index = defaultdict(list)
        self.number_of_concepts = 0

        self.experiments = Experiments(setting)
示例#9
0
import pandas as pd
from model_training import ModelTraining
from preprocessing import Preprocessing
from metrics import Metrics
from data_source import DataSource
from experiments import Experiments
from model_inference import ModelInference

model = Experiments().run_experiment()

ModelTraining().model_training()

ModelInference().predict()
示例#10
0
文件: elwin.py 项目: Nordstrom/elwin
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
myapp runs planout as a service
"""
from flask import Flask, jsonify, request
from experiments import Experiments

experiments = Experiments()

# Create the application, elastic beanstalk expects the name "application"
app = Flask(__name__)

@app.route("/")
def get_experiments_for_team():
    """Return JSON for team's experiments

    get_expirments_for_team returns experiments json of all experiments
    associated with a team

    Args:
        team_name: name of the team (group_id)
        unit: unique identifier for user
示例#11
0
    if args.plot:
        plot_times_by_batch(args.database)
    else:
        if args.load_database:
            exps = pkl.load(open(args.database))
        else:
            ## Determine the type of sparsity layer to use
            if args.layer_class == 'HiddenRandomBlockLayer':
                layer_class = HiddenRandomBlockLayer
            else:
                layer_class = HiddenBlockLayer

            ## Create experiments
            exps = Experiments(
                input_dim=784,  # data.train_set_x.shape[-1].eval(),
                num_classes=10)

            # Add descriptions of models
            exps.add_layers_description(
                0, {
                    'n_hids': (25, ),
                    'n_units_per': args.units_per_block,
                    'k_pers': (1, 1),
                    'activations': (T.tanh, None),
                    'layer_classes': [
                        HiddenBlockLayer,
                        HiddenBlockLayer,
                    ],
                })
            exps.add_layers_description(