コード例 #1
0
    def _save(self,grp,trail,root=None):
        """Save a group of files to disc by calling the save() method on each file. This internal method is called by walk_groups in turn
        called from the public save() method. The trail of group keys is used to create a directory tree.
        @param grp: A DataFolder group or a DataFile instance
        @param trail: the trail of paths used to get here
        @param root a replacement root directory
        @return Saved Path
        """

        trail=[self._removeDisallowedFilenameChars(t) for t in trail]
        grp.filename=self._removeDisallowedFilenameChars(grp.filename)
        if root is None:
            root=self.directory
                    
        pth=path.join(root,*trail)
        os.makesdirs(pth)
        grp.save(path.join(pth,grp.filename))
        return grp.filename
コード例 #2
0
    def _save(self,grp,trail,root=None):
        """Save a group of files to disc by calling the save() method on each file. This internal method is called by walk_groups in turn
        called from the public save() method. The trail of group keys is used to create a directory tree.

        Args:
            grp (:py:class:`DataFolder` or :py:calss:`Stoner.DataFile`): A group or file to save
            trail (list of strings): the trail of paths used to get here
            root (string or None): a replacement root directory

        Returns:
            Saved Path
        """

        trail=[self._removeDisallowedFilenameChars(t) for t in trail]
        grp.filename=self._removeDisallowedFilenameChars(grp.filename)
        if root is None:
            root=self.directory

        pth=path.join(root,*trail)
        os.makesdirs(pth)
        grp.save(path.join(pth,grp.filename))
        return grp.filename
コード例 #3
0
if __name__ == '__main__':
    num_classes = 6
    no_of_epochs = 5
    emotion_models_path = '../trained_model/emotion_models/'
    size = [64, 64]
    pathToDataset = "../dataset/ferDataset/"
    inputs, outputs = prepareData(size, pathToDataset)
    inputs = inputs.reshape(inputs.shape[0], inputs.shape[1], inputs.shape[2],
                            1)
    num_of_samples = len(inputs)
    train_data_length = int(num_of_samples * 0.8)
    x_train, x_test = inputs[0:train_data_length], inputs[train_data_length:]
    y_train, y_test = outputs[0:train_data_length], outputs[train_data_length:]
    model = buildCNNModel(inputs.shape[1:], num_classes, 32, (3, 3), 0.05,
                          (2, 2), 1)
    #model = buildCnnModel(inputs.shape[1:], num_classes, 32, (3,3), 0.05, (2,2), 1)
    print(model.summary())
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    history = model.fit(x_train,
                        y_train,
                        batch_size=32,
                        epochs=no_of_epochs,
                        validation_data=(x_test, y_test))

    if not os.path.exists(emotion_models_path):
        os.makesdirs(emotion_models_path)
    model.save(emotion_models_path + 'emotion_recod_%d_acc-%f.model' %
               (no_of_epochs, history.history['val_acc'][4]))
コード例 #4
0
ファイル: kpca.py プロジェクト: emilleishida/MLSNeSpectra
# read data
op1 = open(path1, 'r')
lin1 = op1.readlines()
op1.close()

data1 = [elem.split() for elem in lin1]

# build matrix
matrix = np.array([[float(item) for item in line] for line in data1[1:]])

# perform kpca
kpca = KernelPCA(kernel="linear", gamma=par_gamma)
X_kpca = kpca.fit_transform(matrix)

if not os.path.isdir('plots_' + case + '/'):
    os.makesdirs('plots_' + case + '/')

for i in xrange(0,1):
    for j in xrange(1, 2):
        if i < j:
            plt.figure()
            plt.title('gamma = ' + str(par_gamma))
            plt.scatter(X_kpca[:,i], X_kpca[:,j])
            plt.xlabel('kPC' + str(i + 1))
            plt.ylabel('kPC' + str(j + 1))
            plt.savefig('plots_' + case + '/kpca_plot_' + str(i + 1)+ '_' + str(j + 1) + '_gamma_' + str(par_gamma) + '_' + case + '.png')
        
        
plt.figure()
plt.scatter(range(1, 11), kpca.lambdas_[:10]/sum(kpca.lambdas_))
plt.xlabel('kPCA')
コード例 #5
0
ファイル: runATPR.py プロジェクト: fossabot/atpr
    def process(self, dataSource, progressBar):

        skCase = Case.getCurrentCase().getSleuthkitCase()

        #Create all the variables needed for show them in the autopsy interface
        self.log(Level.INFO, "Begin Create New Artifacts")
        try:
            artID_eu = skCase.addArtifactType("TSK_ATPR", "ATPR Results")
        except:
            self.log(Level.INFO, "Artifacts Creation Error, ID ==> ")

        try:
            attID_filePath = skCase.addArtifactAttributeType(
                "TSK_FILE_PATH",
                BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING,
                "File Path")
        except:
            self.log(Level.INFO, "Attributes Creation Error, File path ==> ")

        try:
            attID_match = skCase.addArtifactAttributeType(
                "TSK_WORD",
                BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING,
                "Word")
        except:
            self.log(Level.INFO, "Attributes Creation Error, Word ==> ")

        try:
            attTotal_match = skCase.addArtifactAttributeType(
                "TSK_TOTAL",
                BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING,
                "Total")
        except:
            self.log(Level.INFO, "Attributes Creation Error, Total ==> ")

        try:
            attType_match = skCase.addArtifactAttributeType(
                "TSK_TYPE",
                BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING,
                "Type")
        except:
            self.log(Level.INFO, "Attributes Creation Error, Type ==> ")

        try:
            attDict_match = skCase.addArtifactAttributeType(
                "TSK_DICT",
                BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.STRING,
                "Dict")
        except:
            self.log(Level.INFO, "Attributes Creation Error, Dict ==> ")

        artID_eu = skCase.getArtifactTypeID("TSK_ATPR")
        artID_eu_evt = skCase.getArtifactType("TSK_ATPR")
        attID_fp = skCase.getAttributeType("TSK_FILE_PATH")
        attID_match = skCase.getAttributeType("TSK_WORD")
        attTotal_match = skCase.getAttributeType("TSK_TOTAL")
        attType_match = skCase.getAttributeType("TSK_TYPE")
        attDict_match = skCase.getAttributeType("TSK_DICT")

        # we don't know how much work there will be
        progressBar.switchToIndeterminate()

        # Get the folder with de texts files
        inputDir = Case.getCurrentCase().getModulesOutputDirAbsPath(
        ) + "\TextFiles"

        try:
            os.makesdirs(inputDir)
            self.log(
                Level.INFO,
                "Find Text Directory must exists for launching the module" +
                inputDir)
        except:
            self.log(Level.INFO, "Find Text Directory exists " + inputDir)

        # We'll save our output to a file in the reports folder, named based on EXE and data source ID
        reportPath = Case.getCurrentCase().getCaseDirectory(
        ) + "\\Reports\\atprResult-" + str(dataSource.getId()) + ".csv"

        #reportPath = os.path.join(Case.getCurrentCase().getTempDirectory(), str(dataSource.getId()))
        logPath = Case.getCurrentCase().getCaseDirectory(
        ) + "\\Reports\\log" + str(dataSource.getId()) + ".txt"
        logHandle = open(logPath, 'w')

        # Run the EXE, saving output to the report
        self.log(Level.INFO, "Running program on data source")
        self.log(
            Level.INFO, self.path_to_exe + " -c match -i " + inputDir +
            " -d " + self.path_to_dirs + " -o " + reportPath)
        subprocess.Popen([
            self.path_to_exe, "-c", "match", "-i", inputDir, "-d",
            self.path_to_dirs, "-o", reportPath
        ],
                         stdout=logHandle).communicate()
        logHandle.close()

        # Add the report to the case, so it shows up in the tree
        Case.getCurrentCase().addReport(reportPath, "Run EXE",
                                        "ATPR result output")

        self.log(Level.INFO, "Report created on " + reportPath)

        self.log(Level.INFO, "Adding elements to Autopsy")

        #Open with codecs for utf-8 parsing
        result = codecs.open(reportPath, encoding='utf-8')

        files_array = set()
        for line in result:
            fields = line.split(';')
            fields_splitted = fields[0].split("\\")
            files_array.add(fields_splitted[len(fields_splitted) - 1])

        fileManager = Case.getCurrentCase().getServices().getFileManager()

        result.close()

        #Adding elements to autopsy reports interface
        for item in files_array:

            files = fileManager.findFiles(dataSource, item)

            for file in files:

                result = codecs.open(reportPath, encoding='utf-8')

                for line in result:
                    fields = line.split(';')

                    if file.getName() in fields[0]:
                        art = file.newArtifact(artID_eu)

                        art.addAttributes(((BlackboardAttribute(attID_fp, RunExeIngestModuleFactory.moduleName, fields[0])), \
                        (BlackboardAttribute(attID_match, RunExeIngestModuleFactory.moduleName, fields[1])), \
                        (BlackboardAttribute(attTotal_match, RunExeIngestModuleFactory.moduleName, fields[2])), \
                        (BlackboardAttribute(attType_match, RunExeIngestModuleFactory.moduleName, fields[3])), \
                        (BlackboardAttribute(attDict_match, RunExeIngestModuleFactory.moduleName, fields[4]))))

                IngestServices.getInstance().fireModuleDataEvent(
                    ModuleDataEvent(RunExeIngestModuleFactory.moduleName,
                                    artID_eu_evt, None))

                result.close()

        self.log(Level.INFO, "Done")

        return IngestModule.ProcessResult.OK