mirror of
https://github.com/PAMGuard/PAMGuard.git
synced 2024-11-21 22:52:22 +00:00
Updates to deep learning module structure get delphinID working
Also sorted out some build issues with Tethys in the POM
This commit is contained in:
parent
85ae261a4c
commit
9b77a97a17
@ -6,8 +6,9 @@
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-21">
|
||||
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/jdk-21.0.2.13-hotspot">
|
||||
<attributes>
|
||||
<attribute name="module" value="true"/>
|
||||
<attribute name="maven.pomderived" value="true"/>
|
||||
</attributes>
|
||||
</classpathentry>
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -108,3 +108,4 @@ settings.xml
|
||||
.classpath
|
||||
.classpath
|
||||
.classpath
|
||||
.classpath
|
||||
|
100
pom.xml
100
pom.xml
@ -4,7 +4,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>org.pamguard</groupId>
|
||||
<artifactId>Pamguard</artifactId>
|
||||
<version>2.02.11b</version>
|
||||
<version>2.02.11c</version>
|
||||
<name>Pamguard</name>
|
||||
<description>Pamguard using Maven to control dependencies</description>
|
||||
<url>www.pamguard.org</url>
|
||||
@ -19,7 +19,7 @@
|
||||
<javafx.version>21</javafx.version>
|
||||
<maven.compiler.source>11</maven.compiler.source>
|
||||
<maven.compiler.target>11</maven.compiler.target>
|
||||
<!-- Thethys version control -->
|
||||
<!-- Tethys version control -->
|
||||
<jaxb.runtime.version>2.4.0-b180830.0438</jaxb.runtime.version>
|
||||
<jaxb.api.version>2.4.0-b180830.0359</jaxb.api.version>
|
||||
<jaxb.xjc.version>2.4.0-b180830.0438</jaxb.xjc.version>
|
||||
@ -74,59 +74,63 @@
|
||||
</dependencies>
|
||||
</plugin>
|
||||
|
||||
<!-- Set up javafx properly. -->
|
||||
<!-- Set up javafx properly. -->
|
||||
<plugin>
|
||||
<groupId>org.openjfx</groupId>
|
||||
<artifactId>javafx-maven-plugin</artifactId>
|
||||
<version>0.0.8</version>
|
||||
</plugin>
|
||||
|
||||
<!-- Maven Shade plugin - for creating the uberjar / fatjar -->
|
||||
<!-- see http://maven.apache.org/plugins/maven-shade-plugin/index.html for details -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer
|
||||
implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
|
||||
</transformers>
|
||||
</configuration>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||
<manifestEntries>
|
||||
<Main-Class>pamguard.Pamguard</Main-Class>
|
||||
<Class-Path>.</Class-Path> <!-- this is from the orig ant build file -->
|
||||
<SplashScreen-Image>Resources/pgBlankSplash.png</SplashScreen-Image> <!-- this is from the orig ant build file -->
|
||||
</manifestEntries>
|
||||
</transformer>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
|
||||
</transformers>
|
||||
<filters>
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/*.SF</exclude> <!-- get rid of manifests from library jars - also done in orig ant build file -->
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<!-- Maven Shade plugin - for creating the uberjar / fatjar -->
|
||||
<!-- see http://maven.apache.org/plugins/maven-shade-plugin/index.html for details -->
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>3.2.1</version>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
|
||||
</transformers>
|
||||
<filters>
|
||||
<!-- Moved these filters up to here from where they were int <executions><execution><configurtion>
|
||||
based on advice at https://stackoverflow.com/questions/34738653/maven-shade-plugin-does-not-exclude-the-manifest-signature-files
|
||||
and the are now working -->
|
||||
<filter>
|
||||
<artifact>*:*</artifact>
|
||||
<excludes>
|
||||
<exclude>META-INF/*.SF</exclude> <!-- get rid of manifests from library jars - also done in orig ant build file -->
|
||||
<exclude>META-INF/*.DSA</exclude>
|
||||
<exclude>META-INF/*.RSA</exclude>
|
||||
</excludes>
|
||||
</filter>
|
||||
</filters>
|
||||
</configuration>
|
||||
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals>
|
||||
<goal>shade</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||
<manifestEntries>
|
||||
<Main-Class>pamguard.Pamguard</Main-Class>
|
||||
<Class-Path>.</Class-Path> <!-- this is from the orig ant build file -->
|
||||
<SplashScreen-Image>Resources/pgBlankSplash.png</SplashScreen-Image> <!-- this is from the orig ant build file -->
|
||||
</manifestEntries>
|
||||
</transformer>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
|
||||
</transformers>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
|
||||
<!-- The Maven-JDEPS plugin, to analyze necessary JDK dependencies. See
|
||||
this site for details: https://maven.apache.org/plugins/maven-jdeps-plugin/index.html -->
|
||||
|
||||
<!-- The Maven-JDEPS plugin, to analyze necessary JDK dependencies. See
|
||||
this site for details: https://maven.apache.org/plugins/maven-jdeps-plugin/index.html -->
|
||||
<plugin>
|
||||
<groupId>com.github.marschall</groupId>
|
||||
<artifactId>jdeps-maven-plugin</artifactId>
|
||||
|
@ -59,7 +59,7 @@ public class WavFileInputStream extends AudioInputStream {
|
||||
//wav files (this is in fact tha standard emthod for calculated blockalign)
|
||||
int blockAlign = wavHeader.getNChannels() * (wavHeader.getBitsPerSample() / 8);
|
||||
|
||||
System.out.println("NFRAMES: " + nFrames + " "+ wavHeader.getDataSize() + " " + wavHeader.getBlockAlign() + " "+blockAlign );
|
||||
// System.out.println("NFRAMES: " + nFrames + " "+ wavHeader.getDataSize() + " " + wavHeader.getBlockAlign() + " "+blockAlign );
|
||||
|
||||
Encoding encoding = getEncoding(wavHeader.getFmtTag());
|
||||
if (encoding == null) {
|
||||
|
@ -9,10 +9,10 @@ import org.jamdev.jdl4pam.transforms.DLTransfromParams;
|
||||
import org.jamdev.jdl4pam.transforms.SimpleTransform;
|
||||
import org.jamdev.jdl4pam.transforms.SimpleTransformParams;
|
||||
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.DLStatus;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import warnings.PamWarning;
|
||||
|
||||
/**
|
||||
@ -32,7 +32,7 @@ public interface DLClassiferModel {
|
||||
*
|
||||
* @return the deep learning model.
|
||||
*/
|
||||
public ArrayList<? extends PredictionResult> runModel(ArrayList<GroupedRawData> rawDataUnit);
|
||||
public ArrayList<? extends PredictionResult> runModel(ArrayList<? extends PamDataUnit> rawDataUnit);
|
||||
|
||||
/**
|
||||
* Prepare the model. This is called on PAMGuard start up.
|
||||
|
@ -18,8 +18,8 @@ import rawDeepLearningClassifier.layoutFX.DLDetectionGraphics;
|
||||
import rawDeepLearningClassifier.layoutFX.DLGraphics;
|
||||
import rawDeepLearningClassifier.logging.DLAnnotation;
|
||||
import rawDeepLearningClassifier.logging.DLAnnotationType;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDataBlock;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
|
||||
/**
|
||||
* The deep learning classification process. This takes a segment of raw data from the segmenter.
|
||||
|
@ -5,14 +5,14 @@ import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.SoundSpotResult;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
/**
|
||||
* Creates a que for grouped data units for classiifcation.
|
||||
* @author au671271
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public abstract class DLTaskThread extends Thread {
|
||||
@ -28,7 +28,7 @@ public abstract class DLTaskThread extends Thread {
|
||||
/**
|
||||
* Holds a list of segmented raw data units which need to be classified.
|
||||
*/
|
||||
private List<ArrayList<GroupedRawData>> queue = Collections.synchronizedList(new ArrayList<ArrayList<GroupedRawData>>());
|
||||
private List<ArrayList<? extends PamDataUnit>> queue = Collections.synchronizedList(new ArrayList<ArrayList<? extends PamDataUnit>>());
|
||||
|
||||
|
||||
public DLTaskThread(DLModelWorker soundSpotWorker) {
|
||||
@ -50,7 +50,7 @@ public abstract class DLTaskThread extends Thread {
|
||||
try {
|
||||
if (queue.size()>0) {
|
||||
System.out.println("DL TASK THREAD: " + "The queue size is " + queue.size());
|
||||
ArrayList<GroupedRawData> groupedRawData = queue.remove(0);
|
||||
ArrayList<? extends PamDataUnit> groupedRawData = queue.remove(0);
|
||||
|
||||
ArrayList<GenericPrediction> modelResult = dlModelWorker.runModel(groupedRawData,
|
||||
groupedRawData.get(0).getParentDataBlock().getSampleRate(), 0); //TODO channel?
|
||||
@ -79,17 +79,17 @@ public abstract class DLTaskThread extends Thread {
|
||||
* @param soundSpotResult - the new result.
|
||||
* @param groupedRawData - the grouped data unit.
|
||||
*/
|
||||
public abstract void newDLResult(GenericPrediction soundSpotResult, GroupedRawData groupedRawData);
|
||||
public abstract void newDLResult(GenericPrediction soundSpotResult, PamDataUnit groupedRawData);
|
||||
|
||||
/**
|
||||
* Get the grouped data queue
|
||||
* @return
|
||||
*/
|
||||
public List<ArrayList<GroupedRawData>> getQueue() {
|
||||
public List<ArrayList<? extends PamDataUnit>> getQueue() {
|
||||
return queue;
|
||||
}
|
||||
|
||||
public void setQueue(List<ArrayList<GroupedRawData>> queue) {
|
||||
public void setQueue(List<ArrayList<? extends PamDataUnit>> queue) {
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@ import PamController.PamSettings;
|
||||
import PamDetection.RawDataUnit;
|
||||
import PamUtils.PamArrayUtils;
|
||||
import PamUtils.PamCalendar;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import javafx.stage.FileChooser.ExtensionFilter;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.DLStatus;
|
||||
@ -20,13 +21,13 @@ import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericDLClassifier;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.layoutFX.DLSettingsPane;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import warnings.PamWarning;
|
||||
import warnings.WarningSystem;
|
||||
|
||||
/**
|
||||
* A useful abstract class for standard models which are a file or URL that is loaded, have a UI and
|
||||
* utilise PAMSettings to save settings state.
|
||||
* utilise PAMSettings to save settings state. These models only accept raw sound data segments.
|
||||
*/
|
||||
public abstract class StandardClassifierModel implements DLClassiferModel, PamSettings {
|
||||
|
||||
@ -56,12 +57,15 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
|
||||
|
||||
@Override
|
||||
public ArrayList<? extends PredictionResult> runModel(ArrayList<GroupedRawData> groupedRawData) {
|
||||
@SuppressWarnings("rawtypes")
|
||||
public ArrayList<? extends PredictionResult> runModel( ArrayList<? extends PamDataUnit> groupedRawData) {
|
||||
if (getDLWorker().isModelNull()) return null;
|
||||
|
||||
// System.out.println("SoundSpotClassifier: PamCalendar.isSoundFile(): "
|
||||
// + PamCalendar.isSoundFile() + " " + (PamCalendar.isSoundFile() && !forceQueue));
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* If a sound file is being analysed then Ketos can go as slow as it wants. if used in real time
|
||||
* then there is a buffer with a maximum queue size.
|
||||
@ -165,7 +169,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
return DLStatus.NO_MODEL_LOADED;
|
||||
}
|
||||
|
||||
// if continous data is selected and all classes are false then this is a
|
||||
// if continuous data is selected and all classes are false then this is a
|
||||
// potential mistake...
|
||||
if (dlControl.getSettingsPane().getSelectedParentDataBlock().getUnitClass() == RawDataUnit.class
|
||||
&& (getDLParams().binaryClassification==null || PamArrayUtils.isAllFalse(getDLParams().binaryClassification))){
|
||||
@ -200,7 +204,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
}
|
||||
|
||||
@Override
|
||||
public void newDLResult(GenericPrediction soundSpotResult, GroupedRawData groupedRawData) {
|
||||
public void newDLResult(GenericPrediction soundSpotResult, PamDataUnit groupedRawData) {
|
||||
soundSpotResult.setClassNameID(GenericDLClassifier.getClassNameIDs(getDLParams()));
|
||||
soundSpotResult.setBinaryClassification(GenericDLClassifier.isBinaryResult(soundSpotResult, getDLParams()));
|
||||
newResult(soundSpotResult, groupedRawData);
|
||||
@ -220,8 +224,10 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
* @param modelResult - the model result;
|
||||
* @param groupedRawData - the grouped raw data.
|
||||
*/
|
||||
protected void newResult(GenericPrediction modelResult, GroupedRawData groupedRawData) {
|
||||
this.dlControl.getDLClassifyProcess().newModelResult(modelResult, groupedRawData);
|
||||
protected void newResult(GenericPrediction modelResult, PamDataUnit groupedRawData) {
|
||||
if (groupedRawData instanceof GroupedRawData) {
|
||||
this.dlControl.getDLClassifyProcess().newModelResult(modelResult, (GroupedRawData) groupedRawData);
|
||||
}
|
||||
}
|
||||
//
|
||||
// @Override
|
||||
|
@ -1,8 +1,24 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import PamController.PamControlledUnitSettings;
|
||||
import PamController.PamSettings;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.DLStatus;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassName;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.PredictionResult;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelClassifier;
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
|
||||
/**
|
||||
* A classifier based on the delphinID method which uses whistle contours to predict
|
||||
@ -11,31 +27,77 @@ import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelWorke
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class DelphinIDClassifier extends ArchiveModelClassifier{
|
||||
|
||||
private static final String MODEL_NAME = "delphinID";
|
||||
|
||||
/**
|
||||
* Reference to the worker
|
||||
*/
|
||||
private DelphinIDWorker delphinIDWorker;
|
||||
public class DelphinIDClassifier extends StandardClassifierModel {
|
||||
|
||||
public DelphinIDClassifier(DLControl dlControl) {
|
||||
super(dlControl);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return MODEL_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ArchiveModelWorker getModelWorker() {
|
||||
if (delphinIDWorker==null) {
|
||||
delphinIDWorker= new DelphinIDWorker();
|
||||
}
|
||||
return delphinIDWorker;
|
||||
// TODO Auto-generated constructor stub
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isModelType(URI model) {
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DLCLassiferModelUI getModelUI() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializable getDLModelSettings() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUnitType() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializable getSettingsReference() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSettingsVersion() {
|
||||
// TODO Auto-generated method stub
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean restoreSettings(PamControlledUnitSettings pamControlledUnitSettings) {
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUnitName() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DLModelWorker<GenericPrediction> getDLWorker() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StandardModelParams getDLParams() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
|
||||
public class DelphinIDPrediction extends GenericPrediction{
|
||||
|
||||
public DelphinIDPrediction(float[] prob) {
|
||||
super(prob);
|
||||
// TODO Auto-generated constructor stub
|
||||
}
|
||||
|
||||
}
|
@ -1,8 +1,39 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelWorker;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
|
||||
public class DelphinIDWorker extends ArchiveModelWorker{
|
||||
public class DelphinIDWorker extends DLModelWorker<DelphinIDPrediction>{
|
||||
|
||||
@Override
|
||||
public float[] runModel(float[][][] transformedDataStack) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isModelNull() {
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DelphinIDPrediction makeModelResult(float[] prob, double time) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepModel(StandardModelParams delphinIDParams, DLControl dlControl) {
|
||||
// TODO Auto-generated method stub
|
||||
}
|
||||
|
||||
@Override
|
||||
public void closeModel() {
|
||||
// TODO Auto-generated method stub
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -4,14 +4,14 @@ import java.io.Serializable;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.DLStatus;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassName;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.PredictionResult;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import warnings.PamWarning;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
/**
|
||||
* Classifier which returns a random results. Used for debugging and testing.
|
||||
@ -62,8 +62,7 @@ public class DummyClassifier implements DLClassiferModel{
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ArrayList<PredictionResult> runModel(ArrayList<GroupedRawData> rawDataUnit) {
|
||||
public ArrayList<? extends PredictionResult> runModel(ArrayList<? extends PamDataUnit> rawDataUnit) {
|
||||
ArrayList<PredictionResult> modelResults = new ArrayList<PredictionResult>();
|
||||
|
||||
for (int i=0; i<rawDataUnit.size(); i++) {
|
||||
|
@ -11,16 +11,16 @@ import org.jamdev.jdl4pam.transforms.DLTransformsFactory;
|
||||
import org.jamdev.jdl4pam.utils.DLUtils;
|
||||
import org.jamdev.jpamutils.wavFiles.AudioData;
|
||||
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* Runs the deep learning model and performs feature extraction.
|
||||
* <p>
|
||||
*
|
||||
*
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
@ -41,71 +41,83 @@ public abstract class DLModelWorker<T> {
|
||||
* True to enable normalisation of results using softmax;
|
||||
*/
|
||||
private boolean enableSoftMax = true;
|
||||
|
||||
|
||||
/**
|
||||
* Convert a list of data units to a stack if images.
|
||||
* @param dataUnits - the data units.
|
||||
* @param sampleRate - the sample rate
|
||||
* @param iChan - the channels
|
||||
* @return a stack of images for input into a deep learning model.
|
||||
*/
|
||||
public float[][][] dataUnits2ModelInput(ArrayList<? extends PamDataUnit> dataUnits, float sampleRate, int iChan){
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
ArrayList<GroupedRawData> rawDataUnits = ( ArrayList<GroupedRawData>) dataUnits;
|
||||
|
||||
//the number of chunks.
|
||||
int numChunks = rawDataUnits.size();
|
||||
|
||||
//data input into the model - a stack of spectrogram images.
|
||||
float[][][] transformedDataStack = new float[numChunks][][];
|
||||
|
||||
//generate the spectrogram stack.
|
||||
AudioData soundData;
|
||||
double[][] transformedData2; //spec data
|
||||
double[] transformedData1; //waveform data
|
||||
for (int j=0; j<numChunks; j++) {
|
||||
|
||||
soundData = new AudioData(rawDataUnits.get(j).getRawData()[iChan], sampleRate);
|
||||
|
||||
// for (int i=0; i<modelTransforms.size(); i++) {
|
||||
// System.out.println("Transfrom type: " + modelTransforms.get(i).getDLTransformType());
|
||||
// }
|
||||
//set the sound in the first transform.
|
||||
((WaveTransform) modelTransforms.get(0)).setWaveData(soundData);
|
||||
|
||||
// System.out.println("Model transforms:no. " + modelTransforms.size()+ " input sounds len: " + soundData.getLengthInSeconds()
|
||||
// + " Decimate Params: " + ((WaveTransform) modelTransforms.get(0)).getParams()[0] + "max amplitude sound: " + PamArrayUtils.max(soundData.samples));
|
||||
|
||||
DLTransform transform = modelTransforms.get(0);
|
||||
for (int i =0; i<modelTransforms.size(); i++) {
|
||||
transform = modelTransforms.get(i).transformData(transform);
|
||||
// //TEMP
|
||||
// if (transform instanceof FreqTransform) {
|
||||
// transformedData = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
// System.out.println("DLModelWorker: transform : " + modelTransforms.get(i).getDLTransformType() + " "+ i + transformedData.length + " " + transformedData[0].length + " minmax: " + PamArrayUtils.minmax(transformedData)[0] + " " + PamArrayUtils.minmax(transformedData)[1]);
|
||||
// }
|
||||
}
|
||||
|
||||
if (transform instanceof FreqTransform) {
|
||||
//add a spectrogram to the stacl
|
||||
transformedData2 = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
transformedDataStack[j] = DLUtils.toFloatArray(transformedData2);
|
||||
}
|
||||
else {
|
||||
//add wavefrom to the stack = we make the 2nd dimesnion 1.
|
||||
transformedData1 = ((WaveTransform) transform).getWaveData().getScaledSampleAmplitudes();
|
||||
transformedDataStack[j] = new float[1][transformedData1.length];
|
||||
transformedDataStack[j][0] = DLUtils.toFloatArray(transformedData1);
|
||||
}
|
||||
}
|
||||
return transformedDataStack;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Run the initial data feature extraction and the model
|
||||
* @param rawDataUnit - the raw data unit.
|
||||
* @param rawDataUnit - the raw data unit. This is a stack of data units to be classified either together or seperately.
|
||||
* @param iChan - the channel to run the data on.
|
||||
* @return the model to run.
|
||||
*/
|
||||
public synchronized ArrayList<T> runModel(ArrayList<GroupedRawData> rawDataUnits, float sampleRate, int iChan) {
|
||||
public synchronized ArrayList<T> runModel(ArrayList<? extends PamDataUnit> dataUnits, float sampleRate, int iChan) {
|
||||
|
||||
try {
|
||||
//the number of chunks.
|
||||
int numChunks = rawDataUnits.size();
|
||||
|
||||
//PamCalendar.isSoundFile();
|
||||
//create an audio data object from the raw data chunk
|
||||
long timeStart = System.nanoTime();
|
||||
|
||||
//data input into the model - a stack of spectrogram images.
|
||||
float[][][] transformedDataStack = new float[numChunks][][];
|
||||
|
||||
//generate the spectrogram stack.
|
||||
AudioData soundData;
|
||||
double[][] transformedData2; //spec data
|
||||
double[] transformedData1; //waveform data
|
||||
for (int j=0; j<numChunks; j++) {
|
||||
|
||||
|
||||
soundData = new AudioData(rawDataUnits.get(j).getRawData()[iChan], sampleRate);
|
||||
|
||||
|
||||
|
||||
// for (int i=0; i<modelTransforms.size(); i++) {
|
||||
// System.out.println("Transfrom type: " + modelTransforms.get(i).getDLTransformType());
|
||||
// }
|
||||
|
||||
//set the sound in the first transform.
|
||||
((WaveTransform) modelTransforms.get(0)).setWaveData(soundData);
|
||||
|
||||
// System.out.println("Model transforms:no. " + modelTransforms.size()+ " input sounds len: " + soundData.getLengthInSeconds()
|
||||
// + " Decimate Params: " + ((WaveTransform) modelTransforms.get(0)).getParams()[0] + "max amplitude sound: " + PamArrayUtils.max(soundData.samples));
|
||||
|
||||
DLTransform transform = modelTransforms.get(0);
|
||||
for (int i =0; i<modelTransforms.size(); i++) {
|
||||
transform = modelTransforms.get(i).transformData(transform);
|
||||
// //TEMP
|
||||
// if (transform instanceof FreqTransform) {
|
||||
// transformedData = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
// System.out.println("DLModelWorker: transform : " + modelTransforms.get(i).getDLTransformType() + " "+ i + transformedData.length + " " + transformedData[0].length + " minmax: " + PamArrayUtils.minmax(transformedData)[0] + " " + PamArrayUtils.minmax(transformedData)[1]);
|
||||
// }
|
||||
}
|
||||
|
||||
if (transform instanceof FreqTransform) {
|
||||
//add a spectrogram to the stacl
|
||||
transformedData2 = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
transformedDataStack[j] = DLUtils.toFloatArray(transformedData2);
|
||||
|
||||
}
|
||||
else {
|
||||
//add wavefrom to the stack = we make the 2nd dimesnion 1.
|
||||
transformedData1 = ((WaveTransform) transform).getWaveData().getScaledSampleAmplitudes();
|
||||
transformedDataStack[j] = new float[1][transformedData1.length];
|
||||
transformedDataStack[j][0] = DLUtils.toFloatArray(transformedData1);
|
||||
}
|
||||
}
|
||||
float[][][] transformedDataStack = dataUnits2ModelInput(dataUnits, sampleRate, iChan);
|
||||
|
||||
//run the model.
|
||||
float[] output = null;
|
||||
|
@ -12,7 +12,7 @@ import rawDeepLearningClassifier.dlClassification.DLClassName;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import warnings.PamWarning;
|
||||
|
||||
|
||||
|
@ -10,6 +10,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import PamController.PamControlledUnitSettings;
|
||||
import PamController.PamSettingManager;
|
||||
import PamController.PamSettings;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.DLStatus;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassName;
|
||||
@ -18,7 +19,7 @@ import rawDeepLearningClassifier.dlClassification.DLDataUnit;
|
||||
import rawDeepLearningClassifier.dlClassification.DLDetection;
|
||||
import rawDeepLearningClassifier.dlClassification.PredictionResult;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import warnings.PamWarning;
|
||||
|
||||
/**
|
||||
@ -99,18 +100,18 @@ public class OrcaSpotClassifier implements DLClassiferModel, PamSettings {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ArrayList<PredictionResult> runModel(ArrayList<GroupedRawData> rawDataUnits) {
|
||||
public ArrayList<? extends PredictionResult> runModel(ArrayList<? extends PamDataUnit> rawDataUnits) {
|
||||
|
||||
for (GroupedRawData groupedRawData: rawDataUnits){
|
||||
if (queue.size()>MAX_QUEUE_SIZE) {
|
||||
//we are not doing well - clear the buffer
|
||||
queue.clear();
|
||||
}
|
||||
queue.add(groupedRawData);
|
||||
|
||||
|
||||
}
|
||||
this.orcaSpotUI.notifyUpdate(-1);
|
||||
// for (PamDataUnit groupedRawData: rawDataUnits){
|
||||
// if (queue.size()>MAX_QUEUE_SIZE) {
|
||||
// //we are not doing well - clear the buffer
|
||||
// queue.clear();
|
||||
// }
|
||||
// queue.add(groupedRawData);
|
||||
//
|
||||
//
|
||||
// }
|
||||
// this.orcaSpotUI.notifyUpdate(-1);
|
||||
|
||||
return null;
|
||||
}
|
||||
@ -363,4 +364,6 @@ public class OrcaSpotClassifier implements DLClassiferModel, PamSettings {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
145
src/rawDeepLearningClassifier/segmenter/GroupedRawData.java
Normal file
145
src/rawDeepLearningClassifier/segmenter/GroupedRawData.java
Normal file
@ -0,0 +1,145 @@
|
||||
package rawDeepLearningClassifier.segmenter;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import PamDetection.PamDetection;
|
||||
import PamUtils.PamUtils;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
|
||||
/**
|
||||
*
|
||||
* Temporary holder for raw data with a pre defined size. This holds one channel group of raw
|
||||
* sound data.
|
||||
*
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class GroupedRawData extends PamDataUnit implements PamDetection, Cloneable {
|
||||
|
||||
|
||||
/*
|
||||
* Raw data holder
|
||||
*/
|
||||
protected double[][] rawData;
|
||||
|
||||
|
||||
/**
|
||||
* Current position in the rawData;
|
||||
*/
|
||||
protected int[] rawDataPointer;
|
||||
|
||||
/**
|
||||
* The data unit associated with this raw data chunk.
|
||||
*/
|
||||
private PamDataUnit rawDataUnit;
|
||||
|
||||
|
||||
/**
|
||||
* Create a grouped raw data unit. This contains a segment of sound data.
|
||||
* @param timeMilliseconds - the time in milliseconds.
|
||||
* @param channelBitmap - the channel bitmap of the raw data.
|
||||
* @param startSample - the start sample of the raw data.
|
||||
* @param duration - the duration of the raw data in samples.
|
||||
* @param samplesize - the total sample size of the raw data unit chunk in samples.
|
||||
*/
|
||||
public GroupedRawData(long timeMilliseconds, int channelBitmap, long startSample, long duration, int samplesize) {
|
||||
super(timeMilliseconds, channelBitmap, startSample, duration);
|
||||
rawData = new double[PamUtils.getNumChannels(channelBitmap)][];
|
||||
rawDataPointer = new int[PamUtils.getNumChannels(channelBitmap)];
|
||||
// rawDataStartMillis = new long[PamUtils.getNumChannels(channelBitmap)];
|
||||
|
||||
for (int i =0; i<rawData.length; i++) {
|
||||
rawData[i] = new double[samplesize];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the parent data unit.
|
||||
* @param unit - the raw data unit.
|
||||
*/
|
||||
public void setParentDataUnit(PamDataUnit rawDataUnit) {
|
||||
this.rawDataUnit=rawDataUnit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data unit that this raw sound segment is associated with.
|
||||
* @Return unit - the raw data unit
|
||||
*/
|
||||
public PamDataUnit getParentDataUnit() {
|
||||
return rawDataUnit;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Copy raw data from an array to another.
|
||||
* @param src - the array to come from
|
||||
* @param srcPos - the raw source position
|
||||
* @param copyLen - the copy length.
|
||||
* @groupChan - the channel (within the group)
|
||||
* @return overflow - the number of raw data points left at the end which were not copied.
|
||||
*/
|
||||
public int copyRawData(Object src, int srcPos, int copyLen, int groupChan) {
|
||||
//how much of the chunk should we copy?
|
||||
|
||||
|
||||
int lastPos = rawDataPointer[groupChan] + copyLen;
|
||||
|
||||
int dataOverflow = 0;
|
||||
|
||||
int arrayCopyLen;
|
||||
//make sure the copy length
|
||||
if (lastPos>=rawData[groupChan].length) {
|
||||
arrayCopyLen=copyLen-(lastPos-rawData[groupChan].length)-1;
|
||||
dataOverflow = copyLen - arrayCopyLen;
|
||||
}
|
||||
else {
|
||||
arrayCopyLen= copyLen;
|
||||
}
|
||||
|
||||
arrayCopyLen = Math.max(arrayCopyLen, 0);
|
||||
|
||||
//update the current grouped raw data unit with new raw data.
|
||||
System.arraycopy(src, srcPos, rawData[groupChan], rawDataPointer[groupChan], arrayCopyLen);
|
||||
|
||||
rawDataPointer[groupChan]=rawDataPointer[groupChan] + arrayCopyLen;
|
||||
|
||||
return dataOverflow;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the raw data grouped by channel.
|
||||
* @return the raw acoustic data.
|
||||
*/
|
||||
public double[][] getRawData() {
|
||||
return rawData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current pointer for rawData.
|
||||
* @return the data pointer per channel.
|
||||
*/
|
||||
public int[] getRawDataPointer() {
|
||||
return rawDataPointer;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected GroupedRawData clone() {
|
||||
try {
|
||||
GroupedRawData groupedRawData = (GroupedRawData) super.clone();
|
||||
|
||||
//hard clone the acoustic data
|
||||
groupedRawData.rawData = new double[this.rawData.length][];
|
||||
for (int i=0; i<groupedRawData.rawData.length; i++) {
|
||||
groupedRawData.rawData[i] = Arrays.copyOf(this.rawData[i], this.rawData[i].length);
|
||||
}
|
||||
|
||||
return groupedRawData;
|
||||
|
||||
} catch (CloneNotSupportedException e) {
|
||||
e.printStackTrace();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
@ -3,7 +3,6 @@ package rawDeepLearningClassifier.segmenter;
|
||||
import PamguardMVC.PamDataBlock;
|
||||
import PamguardMVC.PamProcess;
|
||||
import rawDeepLearningClassifier.dlClassification.ModelResultDataUnit;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
|
||||
/**
|
||||
* Holds raw data segments which will be classified.
|
||||
|
@ -5,9 +5,7 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
||||
import PamController.PamController;
|
||||
import PamDetection.PamDetection;
|
||||
import PamDetection.RawDataUnit;
|
||||
import PamUtils.PamArrayUtils;
|
||||
import PamUtils.PamUtils;
|
||||
import PamView.GroupedSourceParameters;
|
||||
import PamView.PamDetectionOverlayGraphics;
|
||||
@ -17,12 +15,10 @@ import PamguardMVC.PamDataBlock;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import PamguardMVC.PamObservable;
|
||||
import PamguardMVC.PamProcess;
|
||||
import PamguardMVC.debug.Debug;
|
||||
import clickDetector.ClickDetection;
|
||||
import clipgenerator.ClipDataUnit;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import whistlesAndMoans.ConnectedRegionDataUnit;
|
||||
import PamUtils.PamCalendar;
|
||||
|
||||
|
||||
/**
|
||||
@ -83,7 +79,7 @@ public class SegmenterProcess extends PamProcess {
|
||||
segmenterDataBlock = new SegmenterDataBlock("Segmented Raw Data", this,
|
||||
dlControl.getDLParams().groupedSourceParams.getChanOrSeqBitmap());
|
||||
|
||||
segmenterGroupDataBlock = new SegmenterGroupDataBlock("Segmented Raw Data", this,
|
||||
segmenterGroupDataBlock = new SegmenterGroupDataBlock("Segmented data units", this,
|
||||
dlControl.getDLParams().groupedSourceParams.getChanOrSeqBitmap());
|
||||
|
||||
addOutputDataBlock(segmenterDataBlock);
|
||||
@ -232,8 +228,12 @@ public class SegmenterProcess extends PamProcess {
|
||||
* @param dataUnit - the whistle data unit.
|
||||
*/
|
||||
private void newWhistleData(PamDataUnit dataUnit) {
|
||||
ConnectedRegionDataUnit rawDataUnit = (ConnectedRegionDataUnit) dataUnit;
|
||||
ConnectedRegionDataUnit whistle = (ConnectedRegionDataUnit) dataUnit;
|
||||
|
||||
//TODO
|
||||
//this contains no raw data so we are branching off on a completely different processing path here.
|
||||
//Whislte data units are saved to a buffer and then fed to the deep learning algorohtm
|
||||
|
||||
|
||||
}
|
||||
|
||||
@ -671,145 +671,6 @@ public class SegmenterProcess extends PamProcess {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* Temporary holder for raw data with a pre defined size. This holds one channel group of raw
|
||||
* sound data.
|
||||
*
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public static class GroupedRawData extends PamDataUnit implements PamDetection, Cloneable {
|
||||
|
||||
|
||||
/*
|
||||
* Raw data holder
|
||||
*/
|
||||
protected double[][] rawData;
|
||||
|
||||
|
||||
/**
|
||||
* Current position in the rawData;
|
||||
*/
|
||||
protected int[] rawDataPointer;
|
||||
|
||||
/**
|
||||
* The data unit associated with this raw data chunk.
|
||||
*/
|
||||
private PamDataUnit rawDataUnit;
|
||||
|
||||
|
||||
/**
|
||||
* Create a grouped raw data unit. This contains a segment of sound data.
|
||||
* @param timeMilliseconds - the time in milliseconds.
|
||||
* @param channelBitmap - the channel bitmap of the raw data.
|
||||
* @param startSample - the start sample of the raw data.
|
||||
* @param duration - the duration of the raw data in samples.
|
||||
* @param samplesize - the total sample size of the raw data unit chunk in samples.
|
||||
*/
|
||||
public GroupedRawData(long timeMilliseconds, int channelBitmap, long startSample, long duration, int samplesize) {
|
||||
super(timeMilliseconds, channelBitmap, startSample, duration);
|
||||
rawData = new double[PamUtils.getNumChannels(channelBitmap)][];
|
||||
rawDataPointer = new int[PamUtils.getNumChannels(channelBitmap)];
|
||||
// rawDataStartMillis = new long[PamUtils.getNumChannels(channelBitmap)];
|
||||
|
||||
for (int i =0; i<rawData.length; i++) {
|
||||
rawData[i] = new double[samplesize];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the parent data unit.
|
||||
* @param unit - the raw data unit.
|
||||
*/
|
||||
public void setParentDataUnit(PamDataUnit rawDataUnit) {
|
||||
this.rawDataUnit=rawDataUnit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the data unit that this raw sound segment is associated with.
|
||||
* @Return unit - the raw data unit
|
||||
*/
|
||||
public PamDataUnit getParentDataUnit() {
|
||||
return rawDataUnit;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Copy raw data from an array to another.
|
||||
* @param src - the array to come from
|
||||
* @param srcPos - the raw source position
|
||||
* @param copyLen - the copy length.
|
||||
* @groupChan - the channel (within the group)
|
||||
* @return overflow - the number of raw data points left at the end which were not copied.
|
||||
*/
|
||||
public int copyRawData(Object src, int srcPos, int copyLen, int groupChan) {
|
||||
//how much of the chunk should we copy?
|
||||
|
||||
|
||||
int lastPos = rawDataPointer[groupChan] + copyLen;
|
||||
|
||||
int dataOverflow = 0;
|
||||
|
||||
int arrayCopyLen;
|
||||
//make sure the copy length
|
||||
if (lastPos>=rawData[groupChan].length) {
|
||||
arrayCopyLen=copyLen-(lastPos-rawData[groupChan].length)-1;
|
||||
dataOverflow = copyLen - arrayCopyLen;
|
||||
}
|
||||
else {
|
||||
arrayCopyLen= copyLen;
|
||||
}
|
||||
|
||||
arrayCopyLen = Math.max(arrayCopyLen, 0);
|
||||
|
||||
//update the current grouped raw data unit with new raw data.
|
||||
System.arraycopy(src, srcPos, rawData[groupChan], rawDataPointer[groupChan], arrayCopyLen);
|
||||
|
||||
rawDataPointer[groupChan]=rawDataPointer[groupChan] + arrayCopyLen;
|
||||
|
||||
return dataOverflow;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the raw data grouped by channel.
|
||||
* @return the raw acoustic data.
|
||||
*/
|
||||
public double[][] getRawData() {
|
||||
return rawData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current pointer for rawData.
|
||||
* @return the data pointer per channel.
|
||||
*/
|
||||
public int[] getRawDataPointer() {
|
||||
return rawDataPointer;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected GroupedRawData clone() {
|
||||
try {
|
||||
GroupedRawData groupedRawData = (GroupedRawData) super.clone();
|
||||
|
||||
//hard clone the acoustic data
|
||||
groupedRawData.rawData = new double[this.rawData.length][];
|
||||
for (int i=0; i<groupedRawData.rawData.length; i++) {
|
||||
groupedRawData.rawData[i] = Arrays.copyOf(this.rawData[i], this.rawData[i].length);
|
||||
}
|
||||
|
||||
return groupedRawData;
|
||||
|
||||
} catch (CloneNotSupportedException e) {
|
||||
e.printStackTrace();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void pamStart() {
|
||||
// TODO Auto-generated method stub
|
||||
|
@ -23,7 +23,7 @@ import rawDeepLearningClassifier.defaultModels.RightWhaleModel1;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
/**
|
||||
* Test the generic classifier.
|
||||
|
@ -16,7 +16,7 @@ import org.jamdev.jpamutils.wavFiles.AudioData;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosDLParams;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosWorker2;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
|
@ -17,10 +17,8 @@ import org.junit.jupiter.api.Test;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosDLParams;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosWorker2;
|
||||
import rawDeepLearningClassifier.dlClassification.koogu.KooguModelWorker;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterProcess.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import us.hebi.matlab.mat.format.Mat5;
|
||||
import us.hebi.matlab.mat.format.Mat5File;
|
||||
import us.hebi.matlab.mat.types.Matrix;
|
||||
|
Loading…
Reference in New Issue
Block a user