mirror of
https://github.com/PAMGuard/PAMGuard.git
synced 2024-11-21 22:52:22 +00:00
Updates to get DelphinID working including building a testing framework.
This commit is contained in:
parent
17c428b870
commit
9ba3e7e3f7
@ -114,7 +114,9 @@ public class TDControlFX extends TDControl implements UserDisplayNodeFX {
|
||||
ArrayList<PamDataBlock> dataBlocks=new ArrayList<PamDataBlock>();
|
||||
PamDataBlock dataBlock=this.tdDisplayController.getUserDisplayProcess().getParentDataBlock();
|
||||
if (TDDataProviderRegisterFX.getInstance().findDataProvider(dataBlock)!=null) dataBlocks.add(dataBlock);
|
||||
if (dataBlock!=null) System.out.println("TDControldFX: parent datablock "+dataBlock.getDataName());
|
||||
if (dataBlock!=null) {
|
||||
System.out.println("TDControldFX: parent datablock "+dataBlock.getDataName());
|
||||
}
|
||||
else{
|
||||
System.out.println("TDControldFX: parent datablock null");
|
||||
return dataBlocks;
|
||||
|
@ -201,7 +201,7 @@ public class PamGuiTabFX extends PamTabFX {
|
||||
* @return the internal pane which has been added
|
||||
*/
|
||||
public PamGuiInternalPane addInternalPane(UserDisplayNodeFX userDisplayNodeFX){
|
||||
System.out.println("UserDisplayNodeFX: " + userDisplayNodeFX);
|
||||
// System.out.println("UserDisplayNodeFX: " + userDisplayNodeFX);
|
||||
if (userDisplayNodeFX==null || userDisplayNodeFX.getNode()==null) return null;
|
||||
|
||||
for (PamGuiInternalPane internalPane: this.internalPanes) {
|
||||
|
@ -2,7 +2,7 @@ package rawDeepLearningClassifier.dlClassification;
|
||||
|
||||
import PamguardMVC.DataUnitBaseData;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
/**
|
||||
* A data unit created from classification results of DL model. this data unit holds one model results, i.e.
|
||||
@ -37,7 +37,7 @@ public class DLDataUnit extends PamDataUnit {
|
||||
public DLDataUnit(DataUnitBaseData baseData, float[] data) {
|
||||
super(baseData);
|
||||
//System.out.println("DLDataUnit: " + this.getChannelBitmap());
|
||||
this.modelResult = new GenericPrediction(data);
|
||||
this.modelResult = new StandardPrediction(data);
|
||||
}
|
||||
|
||||
public DLDataUnit(DataUnitBaseData baseData, PredictionResult modelResult) {
|
||||
|
@ -7,7 +7,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
/**
|
||||
@ -52,7 +52,7 @@ public abstract class DLTaskThread extends Thread {
|
||||
System.out.println("DL TASK THREAD: " + "The queue size is " + queue.size());
|
||||
ArrayList<? extends PamDataUnit> groupedRawData = queue.remove(0);
|
||||
|
||||
ArrayList<GenericPrediction> modelResult = dlModelWorker.runModel(groupedRawData,
|
||||
ArrayList<StandardPrediction> modelResult = dlModelWorker.runModel(groupedRawData,
|
||||
groupedRawData.get(0).getParentDataBlock().getSampleRate(), 0); //TODO channel?
|
||||
|
||||
for (int i =0; i<modelResult.size(); i++) {
|
||||
@ -79,7 +79,7 @@ public abstract class DLTaskThread extends Thread {
|
||||
* @param soundSpotResult - the new result.
|
||||
* @param groupedRawData - the grouped data unit.
|
||||
*/
|
||||
public abstract void newDLResult(GenericPrediction soundSpotResult, PamDataUnit groupedRawData);
|
||||
public abstract void newDLResult(StandardPrediction soundSpotResult, PamDataUnit groupedRawData);
|
||||
|
||||
/**
|
||||
* Get the grouped data queue
|
||||
|
@ -19,7 +19,7 @@ import rawDeepLearningClassifier.DLStatus;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericDLClassifier;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.layoutFX.DLSettingsPane;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import warnings.PamWarning;
|
||||
@ -73,7 +73,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
if ((PamCalendar.isSoundFile() && !forceQueue) || dlControl.isViewer()) {
|
||||
//run the model
|
||||
@SuppressWarnings("unchecked")
|
||||
ArrayList<GenericPrediction> modelResult = (ArrayList<GenericPrediction>) getDLWorker().runModel(groupedRawData,
|
||||
ArrayList<StandardPrediction> modelResult = (ArrayList<StandardPrediction>) getDLWorker().runModel(groupedRawData,
|
||||
groupedRawData.get(0).getParentDataBlock().getSampleRate(), 0);
|
||||
|
||||
if (modelResult==null) {
|
||||
@ -205,14 +205,58 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
}
|
||||
|
||||
@Override
|
||||
public void newDLResult(GenericPrediction soundSpotResult, PamDataUnit groupedRawData) {
|
||||
soundSpotResult.setClassNameID(GenericDLClassifier.getClassNameIDs(getDLParams()));
|
||||
soundSpotResult.setBinaryClassification(GenericDLClassifier.isBinaryResult(soundSpotResult, getDLParams()));
|
||||
public void newDLResult(StandardPrediction soundSpotResult, PamDataUnit groupedRawData) {
|
||||
soundSpotResult.setClassNameID(getClassNameIDs(getDLParams()));
|
||||
soundSpotResult.setBinaryClassification(isDecision(soundSpotResult, getDLParams()));
|
||||
newResult(soundSpotResult, groupedRawData);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a decision on whether a result passed a decision
|
||||
* @param modelResult - the model result.
|
||||
* @param modelParmas - the model parameters.
|
||||
* @return true if a threshold has been met.
|
||||
*/
|
||||
public boolean isDecision(StandardPrediction modelResult, StandardModelParams modelParmas) {
|
||||
return isBinaryResult(modelResult, modelParmas);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get the class name IDs
|
||||
* @return an array of class name IDs
|
||||
*/
|
||||
public static short[] getClassNameIDs(StandardModelParams standardModelParams) {
|
||||
if (standardModelParams.classNames==null || standardModelParams.classNames.length<=0) return null;
|
||||
short[] nameIDs = new short[standardModelParams.classNames.length];
|
||||
for (int i = 0 ; i<standardModelParams.classNames.length; i++) {
|
||||
nameIDs[i] = standardModelParams.classNames[i].ID;
|
||||
}
|
||||
return nameIDs;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Check whether a model passes a binary test...
|
||||
* @param modelResult - the model results
|
||||
* @return the model results.
|
||||
*/
|
||||
public static boolean isBinaryResult(StandardPrediction modelResult, StandardModelParams genericModelParams) {
|
||||
for (int i=0; i<modelResult.getPrediction().length; i++) {
|
||||
//System.out.println("Binary Classification: " + genericModelParams.binaryClassification.length);
|
||||
|
||||
if (modelResult.getPrediction()[i]>genericModelParams.threshold && genericModelParams.binaryClassification[i]) {
|
||||
// System.out.println("SoundSpotClassifier: prediciton: " + i + " passed threshold with val: " + modelResult.getPrediction()[i]);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void closeModel() {
|
||||
@ -225,7 +269,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
* @param modelResult - the model result;
|
||||
* @param groupedRawData - the grouped raw data.
|
||||
*/
|
||||
protected void newResult(GenericPrediction modelResult, PamDataUnit groupedRawData) {
|
||||
protected void newResult(StandardPrediction modelResult, PamDataUnit groupedRawData) {
|
||||
if (groupedRawData instanceof GroupedRawData) {
|
||||
this.dlControl.getDLClassifyProcess().newModelResult(modelResult, (GroupedRawData) groupedRawData);
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
|
||||
/**
|
||||
@ -53,7 +53,7 @@ public class SoundSpotClassifier extends StandardClassifierModel {
|
||||
/**
|
||||
* The deep learning model worker.
|
||||
*/
|
||||
private DLModelWorker<GenericPrediction> soundSpotWorker;
|
||||
private DLModelWorker<StandardPrediction> soundSpotWorker;
|
||||
|
||||
|
||||
public SoundSpotClassifier(DLControl dlControl) {
|
||||
@ -171,7 +171,7 @@ public class SoundSpotClassifier extends StandardClassifierModel {
|
||||
|
||||
|
||||
@Override
|
||||
public DLModelWorker<GenericPrediction> getDLWorker() {
|
||||
public DLModelWorker<StandardPrediction> getDLWorker() {
|
||||
if (soundSpotWorker==null) {
|
||||
soundSpotWorker = new SoundSpotWorker();
|
||||
}
|
||||
|
@ -1,13 +1,13 @@
|
||||
package rawDeepLearningClassifier.dlClassification.animalSpot;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
/**
|
||||
* Result from the SoundSpotClassifier.
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class SoundSpotResult extends GenericPrediction {
|
||||
public class SoundSpotResult extends StandardPrediction {
|
||||
|
||||
public SoundSpotResult(float[] prob, boolean isBinary) {
|
||||
super(prob, isBinary);
|
||||
|
@ -7,7 +7,7 @@ import org.jamdev.jdl4pam.animalSpot.AnimalSpotParams;
|
||||
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
|
||||
/**
|
||||
@ -19,7 +19,7 @@ import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class SoundSpotWorker extends DLModelWorker<GenericPrediction> {
|
||||
public class SoundSpotWorker extends DLModelWorker<StandardPrediction> {
|
||||
|
||||
|
||||
/**
|
||||
|
@ -14,7 +14,7 @@ import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosDLParams;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosUI;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
@ -81,7 +81,7 @@ public abstract class ArchiveModelClassifier extends StandardClassifierModel {
|
||||
|
||||
|
||||
@Override
|
||||
public DLModelWorker<GenericPrediction> getDLWorker() {
|
||||
public DLModelWorker<StandardPrediction> getDLWorker() {
|
||||
return getModelWorker();
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -228,8 +228,8 @@ public class ArchiveModelWorker extends GenericModelWorker {
|
||||
|
||||
|
||||
@Override
|
||||
public GenericPrediction makeModelResult(float[] prob, double time) {
|
||||
GenericPrediction prediction = new GenericPrediction(prob);
|
||||
public StandardPrediction makeModelResult(float[] prob, double time) {
|
||||
StandardPrediction prediction = new StandardPrediction(prob);
|
||||
prediction.setAnalysisTime(time);
|
||||
return prediction;
|
||||
}
|
||||
@ -251,6 +251,10 @@ public class ArchiveModelWorker extends GenericModelWorker {
|
||||
public ArchiveModel getModel() {
|
||||
return dlModel;
|
||||
}
|
||||
|
||||
protected void setModel(ArchiveModel dlModel) {
|
||||
this.dlModel = dlModel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isModelNull() {
|
||||
|
@ -13,7 +13,7 @@ import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
|
||||
/**
|
||||
@ -86,6 +86,15 @@ public class DelphinIDClassifier extends StandardClassifierModel {
|
||||
return delphinIDParams;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isDecision(StandardPrediction modelResult, StandardModelParams modelParmas) {
|
||||
//TODO
|
||||
//DelphinID uses a different decision making process to most of the standard classifiers which just pass a binary threhsoild.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
@ -111,7 +120,7 @@ public class DelphinIDClassifier extends StandardClassifierModel {
|
||||
|
||||
|
||||
@Override
|
||||
public DLModelWorker<GenericPrediction> getDLWorker() {
|
||||
public DLModelWorker<StandardPrediction> getDLWorker() {
|
||||
if (delphinIDWorker==null) {
|
||||
delphinIDWorker = new DelphinIDWorker();
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
public class DelphinIDPrediction extends GenericPrediction{
|
||||
public class DelphinIDPrediction extends StandardPrediction{
|
||||
|
||||
public DelphinIDPrediction(float[] prob) {
|
||||
super(prob);
|
||||
|
@ -0,0 +1,243 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import PamUtils.PamArrayUtils;
|
||||
import PamguardMVC.DataUnitBaseData;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDetectionGroup;
|
||||
import us.hebi.matlab.mat.format.Mat5;
|
||||
import us.hebi.matlab.mat.format.Mat5File;
|
||||
import us.hebi.matlab.mat.types.Matrix;
|
||||
import us.hebi.matlab.mat.types.Struct;
|
||||
import whistleClassifier.WhistleContour;
|
||||
import whistlesAndMoans.AbstractWhistleDataUnit;
|
||||
|
||||
|
||||
/**
|
||||
* A delphinID test suite.
|
||||
*
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class DelphinIDTest {
|
||||
|
||||
public static DelphinIDWorker prepDelphinIDModel(String modelPath) {
|
||||
|
||||
//create the delphinID worker.
|
||||
DelphinIDWorker delphinIDWorker = new DelphinIDWorker();
|
||||
|
||||
StandardModelParams params = new StandardModelParams();
|
||||
params.modelPath = modelPath;
|
||||
|
||||
//prepare the model
|
||||
delphinIDWorker.prepModel(params, null);
|
||||
|
||||
return delphinIDWorker;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Load whistle contours from a MAT file. ()
|
||||
*
|
||||
* @param filePath - the file path.
|
||||
*
|
||||
* @return a list of whistle contour objects from the mat file.
|
||||
*/
|
||||
public static ArrayList<AbstractWhistleDataUnit> getWhistleContoursMAT(String filePath){
|
||||
|
||||
ArrayList<AbstractWhistleDataUnit> contours = new ArrayList<AbstractWhistleDataUnit>();
|
||||
|
||||
// SegmenterDetectionGroup segmenterDetectionGroup = new SegmenterDetectionGroup(0, 0, 0, 0);
|
||||
|
||||
// Read scalar from nested struct
|
||||
try {
|
||||
Mat5File matFile = Mat5.readFromFile(filePath);
|
||||
Struct whistlesStruct = matFile.getStruct("whistles");
|
||||
|
||||
double fftLen = matFile.getMatrix("fftlen").getDouble(0);
|
||||
double fftHop = matFile.getMatrix("ffthop").getDouble(0);
|
||||
double sampleRate = matFile.getMatrix("samplerate").getDouble(0);
|
||||
|
||||
for (int i=0; i< whistlesStruct.getNumElements(); i++) {
|
||||
DataUnitBaseData basicData = new DataUnitBaseData();
|
||||
|
||||
long timeMillis = ((Matrix)whistlesStruct.get("millis", i)).getLong(0);
|
||||
basicData.setTimeMilliseconds(timeMillis);
|
||||
|
||||
long sampleDuration = ((Matrix)whistlesStruct.get("sampleDuration", i)).getLong(0);
|
||||
basicData.setSampleDuration(sampleDuration);
|
||||
|
||||
basicData.setMillisecondDuration(1000.*(sampleDuration/sampleRate));
|
||||
|
||||
int channelMap = ((Matrix)whistlesStruct.get("channelMap", i)).getInt(0);
|
||||
basicData.setChannelBitmap(channelMap);
|
||||
|
||||
long uid = ((Matrix)whistlesStruct.get("UID", i)).getLong(0);
|
||||
basicData.setUID(uid);
|
||||
|
||||
long startSample = ((Matrix)whistlesStruct.get("startSample", i)).getLong(0);
|
||||
basicData.setStartSample(startSample);
|
||||
|
||||
int nSlices = ((Matrix)whistlesStruct.get("nSlices", i)).getInt(0);
|
||||
|
||||
double[] freq = new double[nSlices];
|
||||
double[] times = new double[nSlices];
|
||||
|
||||
Matrix contourStruct = whistlesStruct.getMatrix("contour", i);
|
||||
for (int j=0; j<nSlices; j++) {
|
||||
freq[j] = contourStruct.getDouble(j)*sampleRate/fftLen;
|
||||
times[j] = j * fftHop /sampleRate;
|
||||
}
|
||||
|
||||
contours.add(new WhistleContourMAT(basicData, freq, times));
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
return contours;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Segment the detections into groups. Note that segments are overlaps so each whistle may belong to multiple segments.
|
||||
* @param whistles - a list of whistles - not necessarily sorted by time.
|
||||
* @param dataStartMillis - the start time of the data in millis i.e. where the first segment starts.
|
||||
* @param segLen - the segment size in milliseconds.
|
||||
* @param segHop - the segment hop in milliseconds.
|
||||
* @return groups of data units within each segment.
|
||||
*/
|
||||
public static ArrayList<SegmenterDetectionGroup> segmentWhsitleData(ArrayList<AbstractWhistleDataUnit> whistles, long dataStartMillis,
|
||||
double segLen, double segHop){
|
||||
|
||||
ArrayList<SegmenterDetectionGroup> group = new ArrayList<SegmenterDetectionGroup>();
|
||||
|
||||
//find the maximum whistle time
|
||||
long maxTime = Long.MIN_VALUE;
|
||||
long endTime = 0;
|
||||
for (AbstractWhistleDataUnit whislte: whistles) {
|
||||
endTime = (long) (whislte.getTimeMilliseconds()+whislte.getDurationInMilliseconds());
|
||||
if (endTime>maxTime) maxTime=endTime;
|
||||
}
|
||||
|
||||
long segStart = dataStartMillis;
|
||||
long segEnd = (long) (segStart+segLen);
|
||||
|
||||
long whistleStart;
|
||||
long whistleEnd;
|
||||
SegmenterDetectionGroup whistleGroup;
|
||||
while (segStart<endTime){
|
||||
|
||||
whistleGroup = new SegmenterDetectionGroup(segStart, 1, segEnd, segHop);
|
||||
|
||||
for (AbstractWhistleDataUnit whislte: whistles) {
|
||||
whistleStart = whislte.getTimeMilliseconds();
|
||||
whistleEnd = (long) (whislte.getTimeMilliseconds() + whislte.getDurationInMilliseconds());
|
||||
|
||||
if ((whistleStart>=segStart && whistleStart<segEnd) || ((whistleEnd>=segStart && whistleEnd<segEnd))){
|
||||
//some part of the whistle is in the segment.
|
||||
whistleGroup.addSubDetection(whislte);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
group.add(whistleGroup);
|
||||
|
||||
segStart = (long) (segStart+segHop);
|
||||
segEnd = (long) (segStart+segLen);
|
||||
}
|
||||
|
||||
return group;
|
||||
|
||||
}
|
||||
|
||||
public static class WhistleContourMAT extends AbstractWhistleDataUnit {
|
||||
|
||||
private double[] freq;
|
||||
private double[] times;
|
||||
|
||||
public WhistleContourMAT(DataUnitBaseData basicData, double[] freq, double[] times) {
|
||||
super(basicData);
|
||||
this.freq=freq;
|
||||
this.times=times;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSliceCount() {
|
||||
return freq.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double[] getTimesInSeconds() {
|
||||
return times;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double[] getFreqsHz() {
|
||||
return freq;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Main class for running the test.
|
||||
* @param args - the arguments
|
||||
*/
|
||||
public static void main(String args[]) {
|
||||
|
||||
double segLen = 4000.;
|
||||
double segHop = 1000.0;
|
||||
float sampleRate =96000;
|
||||
//unix time from sound file
|
||||
long dataStartMillis = 1340212413000L;
|
||||
|
||||
//path to the .mat containing whistle contours.
|
||||
String whistleContourPath = "D:\\Dropbox\\PAMGuard_dev\\Deep_Learning\\delphinID\\testencounter415\\whistle_contours.mat";
|
||||
|
||||
//the path to the model
|
||||
String modelPath = "D:/Dropbox/PAMGuard_dev/Deep_Learning/delphinID/testencounter415/whistle_4s_encounter415.zip";
|
||||
|
||||
//get the whislte contours form a .mat file.
|
||||
ArrayList<AbstractWhistleDataUnit> whistleContours = getWhistleContoursMAT(whistleContourPath);
|
||||
|
||||
|
||||
//segment the whistle detections
|
||||
ArrayList<SegmenterDetectionGroup> segments = segmentWhsitleData(whistleContours, dataStartMillis,
|
||||
segLen, segHop);
|
||||
|
||||
for (int i=0; i<segments.size(); i++) {
|
||||
System.out.println("Segment " + i + " contains " + segments.get(i).getSubDetectionsCount() + " whistles");
|
||||
}
|
||||
|
||||
DelphinIDWorker model = prepDelphinIDModel(modelPath);
|
||||
|
||||
for (int i=0; i<segments.size(); i++) {
|
||||
|
||||
//remember that the input is a stack of detections to be run by thge model at once - Here we want to do each one individually.
|
||||
ArrayList<SegmenterDetectionGroup> aSegment = new ArrayList<SegmenterDetectionGroup>();
|
||||
aSegment.add(segments.get(i));
|
||||
|
||||
//the prediciton.
|
||||
ArrayList<StandardPrediction> predicition = model.runModel(aSegment, sampleRate, 1);
|
||||
|
||||
float[] output = predicition.get(0).getPrediction();
|
||||
|
||||
}
|
||||
|
||||
// for (int i=0; i<whistleContours.size(); i++) {
|
||||
// System.out.println("Whislte: " + i);
|
||||
// PamArrayUtils.printArray(whistleContours.get(i).getFreqsHz());
|
||||
// }
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -1,74 +1,130 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.jamdev.jdl4pam.transforms.DLTransform;
|
||||
import org.jamdev.jdl4pam.transforms.DLTransfromParams;
|
||||
import org.jamdev.jdl4pam.transforms.FreqTransform;
|
||||
import org.jamdev.jdl4pam.transforms.DLTransform.DLTransformType;
|
||||
import org.jamdev.jdl4pam.transforms.jsonfile.DLTransformsParser;
|
||||
import org.jamdev.jdl4pam.utils.DLUtils;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import ai.djl.Model;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.delphinID.Whistles2Image.Whistle2ImageParams;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDetectionGroup;
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class DelphinIDWorker extends ArchiveModelWorker {
|
||||
|
||||
|
||||
/**
|
||||
* Parameters for the whistle to image transform.
|
||||
*/
|
||||
private Whistle2ImageParams whistleImageParams;
|
||||
|
||||
|
||||
@Override
|
||||
public void prepModel(StandardModelParams dlParams, DLControl dlControl) {
|
||||
//most of the model prep is done in the perent class.
|
||||
super.prepModel(dlParams, dlControl);
|
||||
|
||||
//now have to read the whsitle2image transform to get correct parameters for that.
|
||||
String jsonString = DLTransformsParser.readJSONString(new File(this.getModel().getAudioReprFile()));
|
||||
whistleImageParams = readWhistleImageTransform(new JSONObject(jsonString)) ;
|
||||
if (whistleImageParams==null) {
|
||||
System.err.println("Error: could not find whistle2image transform in DelphinID JSON file. Model will not work.");
|
||||
this.setModel(null); // set model to null to make sure nothing works and errors are thrown
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Read the whistle transform settings- this is not included in the JPAM library because it directly
|
||||
* reference PAMGuard specific detections.
|
||||
*/
|
||||
private Whistle2ImageParams readWhistleImageTransform(JSONObject mainObject) {
|
||||
//first parse the transforms.
|
||||
JSONArray jsonArray = mainObject.getJSONArray("transforms");
|
||||
|
||||
JSONObject jsonObjectParams;
|
||||
for (int i=0; i<jsonArray.length(); i++) {
|
||||
|
||||
String transformName = (String) jsonArray.getJSONObject(i).get("name");
|
||||
|
||||
if (transformName.trim().equals("whistles2image")) {
|
||||
|
||||
jsonObjectParams = (JSONObject) jsonArray.getJSONObject(i).get("params");
|
||||
|
||||
double[] freqLimits = new double[2];
|
||||
double[] size = new double[2];
|
||||
freqLimits[0] = jsonObjectParams.getFloat("minfreq");
|
||||
freqLimits[1] = jsonObjectParams.getFloat("maxfreq");
|
||||
size[0] = jsonObjectParams.getInt("widthpix");
|
||||
size[1] = jsonObjectParams.getInt("heightpix");
|
||||
|
||||
Whistle2ImageParams whistle2ImageParmas = new Whistle2ImageParams();
|
||||
whistle2ImageParmas.freqLimits = freqLimits;
|
||||
whistle2ImageParmas.size = size;
|
||||
|
||||
return whistle2ImageParmas;
|
||||
}
|
||||
|
||||
}
|
||||
//something has gone wrong if we get here.
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public float[][][] dataUnits2ModelInput(ArrayList<? extends PamDataUnit> dataUnits, float sampleRate, int iChan){
|
||||
|
||||
//Get a list of of the model transforms.
|
||||
ArrayList<DLTransform> modelTransforms = getModelTransforms();
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
ArrayList<GroupedRawData> whistleGroups = (ArrayList<GroupedRawData>) dataUnits;
|
||||
|
||||
ArrayList<SegmenterDetectionGroup> whistleGroups = (ArrayList<SegmenterDetectionGroup>) dataUnits;
|
||||
|
||||
//the number of chunks.
|
||||
int numChunks = whistleGroups.size();
|
||||
|
||||
//data input into the model - a stack of spectrogram images.
|
||||
float[][][] transformedDataStack = new float[numChunks][][];
|
||||
//
|
||||
// //generate the spectrogram stack.
|
||||
// AudioData soundData;
|
||||
// double[][] transformedData2; //spec data
|
||||
// double[] transformedData1; //waveform data
|
||||
// for (int j=0; j<numChunks; j++) {
|
||||
//
|
||||
// soundData = new AudioData(rawDataUnits.get(j).getRawData()[iChan], sampleRate);
|
||||
//
|
||||
// // for (int i=0; i<modelTransforms.size(); i++) {
|
||||
// // System.out.println("Transfrom type: " + modelTransforms.get(i).getDLTransformType());
|
||||
// // }
|
||||
// //set the sound in the first transform.
|
||||
// ((WaveTransform) modelTransforms.get(0)).setWaveData(soundData);
|
||||
//
|
||||
//// System.out.println("Model transforms:no. " + modelTransforms.size()+ " input sounds len: " + soundData.getLengthInSeconds()
|
||||
//// + " Decimate Params: " + ((WaveTransform) modelTransforms.get(0)).getParams()[0] + "max amplitude sound: " + PamArrayUtils.max(soundData.samples));
|
||||
//
|
||||
// DLTransform transform = modelTransforms.get(0);
|
||||
// for (int i =0; i<modelTransforms.size(); i++) {
|
||||
// transform = modelTransforms.get(i).transformData(transform);
|
||||
//// //TEMP
|
||||
//// if (transform instanceof FreqTransform) {
|
||||
//// transformedData = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
//// System.out.println("DLModelWorker: transform : " + modelTransforms.get(i).getDLTransformType() + " "+ i + transformedData.length + " " + transformedData[0].length + " minmax: " + PamArrayUtils.minmax(transformedData)[0] + " " + PamArrayUtils.minmax(transformedData)[1]);
|
||||
//// }
|
||||
// }
|
||||
//
|
||||
// if (transform instanceof FreqTransform) {
|
||||
// //add a spectrogram to the stacl
|
||||
// transformedData2 = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
// transformedDataStack[j] = DLUtils.toFloatArray(transformedData2);
|
||||
// }
|
||||
// else {
|
||||
// //add wavefrom to the stack = we make the 2nd dimesnion 1.
|
||||
// transformedData1 = ((WaveTransform) transform).getWaveData().getScaledSampleAmplitudes();
|
||||
// transformedDataStack[j] = new float[1][transformedData1.length];
|
||||
// transformedDataStack[j][0] = DLUtils.toFloatArray(transformedData1);
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
|
||||
double[][] transformedData2; //spectrogram data
|
||||
for (int j=0; j<numChunks; j++) {
|
||||
|
||||
//create the first transform and set then whistle data. Note that the absolute time limits are
|
||||
//contained within the SegmenterDetectionGroup unit.
|
||||
Whistles2Image whistles2Image = new Whistles2Image(whistleGroups.get(j), whistleImageParams);
|
||||
|
||||
//add the transform to the current list.
|
||||
this.getModelTransforms().add(0, whistles2Image);
|
||||
|
||||
//process all the transforms.
|
||||
DLTransform transform = modelTransforms.get(0);
|
||||
for (int i =0; i<modelTransforms.size(); i++) {
|
||||
transform = modelTransforms.get(i).transformData(transform);
|
||||
}
|
||||
|
||||
transformedData2 = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
transformedDataStack[j] = DLUtils.toFloatArray(transformedData2);
|
||||
}
|
||||
|
||||
|
||||
return transformedDataStack;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
@ -1,7 +1,5 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
@ -10,7 +8,6 @@ import javax.swing.JPanel;
|
||||
import PamController.SettingsPane;
|
||||
import javafx.scene.Node;
|
||||
import javafx.stage.FileChooser.ExtensionFilter;
|
||||
import pamViewFX.fxGlyphs.PamSVGIcon;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
|
||||
/**
|
||||
@ -83,24 +80,25 @@ public class DelphinUI implements DLCLassiferModelUI {
|
||||
|
||||
@Override
|
||||
public Node getIcon() {
|
||||
if (icon==null) {
|
||||
PamSVGIcon iconMaker= new PamSVGIcon();
|
||||
PamSVGIcon svgsprite;
|
||||
try {
|
||||
svgsprite = iconMaker.create(getClass().getResource("/Resources/delphinid_logo01.svg").toURI().toURL());
|
||||
icon = svgsprite.getSpriteNode();
|
||||
} catch (MalformedURLException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
} catch (URISyntaxException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
} catch (Exception e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
return icon;
|
||||
// if (icon==null) {
|
||||
// PamSVGIcon iconMaker= new PamSVGIcon();
|
||||
// PamSVGIcon svgsprite;
|
||||
// try {
|
||||
// svgsprite = iconMaker.create(getClass().getResource("/Resources/delphinid_logo01.svg").toURI().toURL());
|
||||
// icon = svgsprite.getSpriteNode();
|
||||
// } catch (MalformedURLException e) {
|
||||
// // TODO Auto-generated catch block
|
||||
// e.printStackTrace();
|
||||
// } catch (URISyntaxException e) {
|
||||
// // TODO Auto-generated catch block
|
||||
// e.printStackTrace();
|
||||
// } catch (Exception e) {
|
||||
// // TODO Auto-generated catch block
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
// }
|
||||
// return icon;
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,12 +1,14 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import java.awt.Color;
|
||||
import java.awt.image.BufferedImage;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.jamdev.jdl4pam.transforms.FreqTransform;
|
||||
import org.jamdev.jpamutils.spectrogram.SpecTransform;
|
||||
|
||||
import javafx.scene.canvas.Canvas;
|
||||
import javafx.scene.image.WritableImage;
|
||||
import javafx.scene.paint.Color;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDetectionGroup;
|
||||
import whistlesAndMoans.AbstractWhistleDataUnit;
|
||||
|
||||
/**
|
||||
* Transform whistles to an image.
|
||||
@ -22,21 +24,21 @@ public class Whistles2Image extends FreqTransform {
|
||||
|
||||
/**
|
||||
* Create an image transform from a whistleGroup.
|
||||
* @param whistleGroup
|
||||
* @param params
|
||||
* @param whistleGroup - the whistle group
|
||||
* @param params - the paramters for whsilte image - min. freq., max. freq., width in pixels and height in pixels.
|
||||
*/
|
||||
public Whistles2Image(SegmenterDetectionGroup whistleGroup, Number[] params) {
|
||||
super(null, params);
|
||||
double[] freqLimits = new double[] {params[0].doubleValue(), params[1].doubleValue()};
|
||||
double[] size = new double[] {params[2].doubleValue(), params[3].doubleValue()};
|
||||
|
||||
SpecTransform specTransform = whistleGroupToImage( whistleGroup, freqLimits, size);
|
||||
|
||||
public Whistles2Image(SegmenterDetectionGroup whistleGroup, Whistle2ImageParams params) {
|
||||
super(null, null);
|
||||
// double[] freqLimits = new double[] {params[0].doubleValue(), params[1].doubleValue()};
|
||||
// double[] size = new double[] {params[2].doubleValue(), params[3].doubleValue()};
|
||||
|
||||
SpecTransform specTransform = whistleGroupToImage( whistleGroup, params.freqLimits, params.size);
|
||||
|
||||
this.setSpecTransfrom(specTransform);
|
||||
this.setFreqlims(freqLimits);
|
||||
|
||||
this.setFreqlims(params.freqLimits);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Convert a group of whistles
|
||||
@ -56,22 +58,21 @@ public class Whistles2Image extends FreqTransform {
|
||||
* 4.8 inches as default, axes removed before saving using plt.axes(‘off’))
|
||||
**/
|
||||
|
||||
double[][] points = whistContours2Points(whistleGroup);
|
||||
ArrayList<double[][]> points = whistContours2Points(whistleGroup);
|
||||
|
||||
Canvas canvas = makeScatterImage(points, size, new double[]{0, whistleGroup.getDurationInMilliseconds()}, freqLimits, 5.);
|
||||
|
||||
WritableImage image = canvas.getGraphicsContext2D().getCanvas().snapshot(null, null);
|
||||
//does not work becaue it has to be on the AWT thread.
|
||||
BufferedImage canvas = makeScatterImage(points, size, new double[]{0, whistleGroup.getDurationInMilliseconds()}, freqLimits, 5.);
|
||||
|
||||
double[][] imaged = new double[(int) size[0]][(int) size[1]];
|
||||
|
||||
Color color;
|
||||
int color;
|
||||
for (int i=0; i<imaged.length; i++) {
|
||||
for (int j=0; j<imaged[0].length; j++) {
|
||||
color = image.getPixelReader().getColor(i, j);
|
||||
imaged[i][j] = color.getRed();
|
||||
color = canvas.getRGB(i, j);
|
||||
imaged[i][j] = color;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
specTransform.setSpecData(imaged);
|
||||
specTransform.setSampleRate((float) (freqLimits[1]*2));
|
||||
|
||||
@ -83,38 +84,99 @@ public class Whistles2Image extends FreqTransform {
|
||||
* @param whistleGroup - list of whistle contours within a detection group.
|
||||
* @return an array with time (milliseconds from start of group) and frequency (Hz)
|
||||
*/
|
||||
private double[][] whistContours2Points(SegmenterDetectionGroup whistleGroup) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
private ArrayList<double[][]> whistContours2Points(SegmenterDetectionGroup whistleGroup) {
|
||||
|
||||
ArrayList<double[][]> contours = new ArrayList<double[][]>();
|
||||
|
||||
AbstractWhistleDataUnit whistleContour;
|
||||
|
||||
long segStart = whistleGroup.getTimeMilliseconds();
|
||||
|
||||
for (int i=0; i<whistleGroup.getSubDetectionsCount(); i++) {
|
||||
|
||||
whistleContour = (AbstractWhistleDataUnit) whistleGroup.getSubDetection(i);
|
||||
|
||||
double[][] contourD = new double[whistleContour.getSliceCount()][2];
|
||||
for (int j=0; j<whistleContour.getSliceCount(); j++) {
|
||||
contourD[j][0] = (segStart - whistleContour.getTimeMilliseconds())/1000. + whistleContour.getTimesInSeconds()[i];
|
||||
contourD[j][1] = whistleContour.getFreqsHz()[j];
|
||||
}
|
||||
contours.add(contourD);
|
||||
}
|
||||
|
||||
return contours;
|
||||
}
|
||||
|
||||
// /**
|
||||
// * Create a scatter image from points
|
||||
// * @param points - list of time frequency points - the points are time (milliseconds from 0) and frequency
|
||||
// * @param size - the width and height of the image in pixels
|
||||
// * @param xlims - the minimum and maximum time in milliseconds from 0;
|
||||
// * @param ylims - the minimum and maximum frequency in Hz
|
||||
// * @param markerSize - the marker size in pixels
|
||||
// * @return an image with y axis as frequency and x axis as time.
|
||||
// */
|
||||
// private Canvas makeScatterImage(ArrayList<double[][]> points, double[] size, double[] xlims, double[] ylims, double markerSize) {
|
||||
//
|
||||
// Canvas canvas = new Canvas(size[0], size[1]);
|
||||
//
|
||||
// double x, y;
|
||||
// for (int j=0; j<points.size(); j++) {
|
||||
//
|
||||
// for (int i=0; i<points.get(j).length; i++) {
|
||||
// canvas.getGraphicsContext2D().setFill(Color.BLACK);
|
||||
//
|
||||
// //Calculate x and y in pixels.
|
||||
// x = ((points.get(j)[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
// y = ((points.get(j)[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
//
|
||||
// canvas.getGraphicsContext2D().fillOval(x+markerSize/2, y-markerSize/2, markerSize/2, markerSize/2);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return canvas;
|
||||
// }
|
||||
|
||||
/**
|
||||
* Create a scatter image from points
|
||||
* @param points - the points are time (milliseconds from 0) and frequency
|
||||
* @param points - list of time frequency points - the points are time (milliseconds from 0) and frequency
|
||||
* @param size - the width and height of the image in pixels
|
||||
* @param xlims - the minimum and maximum time in milliseconds from 0;
|
||||
* @param ylims - the minimum and maximum frequency in Hz
|
||||
* @param markerSize - the marker size in pixels
|
||||
* @return an image with y axis as frequency and x axis as time.
|
||||
*/
|
||||
private Canvas makeScatterImage(double[][] points, double[] size, double[] xlims, double[] ylims, double markerSize) {
|
||||
private BufferedImage makeScatterImage(ArrayList<double[][]> points, double[] size, double[] xlims, double[] ylims, double markerSize) {
|
||||
|
||||
Canvas canvas = new Canvas(size[0], size[1]);
|
||||
BufferedImage canvas = new BufferedImage((int) size[0], (int) size[1], BufferedImage.TYPE_INT_RGB);
|
||||
|
||||
double x, y;
|
||||
for (int i=0; i<points.length; i++) {
|
||||
canvas.getGraphicsContext2D().setFill(Color.BLACK);
|
||||
for (int j=0; j<points.size(); j++) {
|
||||
|
||||
//Calculate x and y in pixels.
|
||||
x = ((points[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
y = ((points[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
for (int i=0; i<points.get(j).length; i++) {
|
||||
canvas.getGraphics().setColor(Color.BLACK);
|
||||
|
||||
canvas.getGraphicsContext2D().fillOval(x+markerSize/2, y-markerSize/2, markerSize/2, markerSize/2);
|
||||
//Calculate x and y in pixels.
|
||||
x = ((points.get(j)[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
y = ((points.get(j)[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
|
||||
canvas.getGraphics().fillOval((int) (x+markerSize/2),(int) (y-markerSize/2), (int) markerSize,(int) markerSize);
|
||||
}
|
||||
}
|
||||
|
||||
return canvas;
|
||||
}
|
||||
|
||||
public static class Whistle2ImageParams {
|
||||
|
||||
/**
|
||||
* The frequency limitis in
|
||||
*/
|
||||
public double[] freqLimits;
|
||||
|
||||
public double[] size;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -184,43 +184,12 @@ public class GenericDLClassifier extends StandardClassifierModel {
|
||||
* @param modelResult - the model result;
|
||||
* @param groupedRawData - the grouped raw data.
|
||||
*/
|
||||
protected void newResult(GenericPrediction modelResult, GroupedRawData groupedRawData) {
|
||||
protected void newResult(StandardPrediction modelResult, GroupedRawData groupedRawData) {
|
||||
this.dlControl.getDLClassifyProcess().newModelResult(modelResult, groupedRawData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the class name IDs
|
||||
* @return an array of class name IDs
|
||||
*/
|
||||
public static short[] getClassNameIDs(StandardModelParams standardModelParams) {
|
||||
if (standardModelParams.classNames==null || standardModelParams.classNames.length<=0) return null;
|
||||
short[] nameIDs = new short[standardModelParams.classNames.length];
|
||||
for (int i = 0 ; i<standardModelParams.classNames.length; i++) {
|
||||
nameIDs[i] = standardModelParams.classNames[i].ID;
|
||||
}
|
||||
return nameIDs;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Check whether a model passes a binary test...
|
||||
* @param modelResult - the model results
|
||||
* @return the model results.
|
||||
*/
|
||||
public static boolean isBinaryResult(GenericPrediction modelResult, StandardModelParams genericModelParams) {
|
||||
for (int i=0; i<modelResult.getPrediction().length; i++) {
|
||||
//System.out.println("Binary Classification: " + genericModelParams.binaryClassification.length);
|
||||
|
||||
if (modelResult.getPrediction()[i]>genericModelParams.threshold && genericModelParams.binaryClassification[i]) {
|
||||
// System.out.println("SoundSpotClassifier: prediciton: " + i + " passed threshold with val: " + modelResult.getPrediction()[i]);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// @Override
|
||||
// public ArrayList<PamWarning> checkSettingsOK() {
|
||||
// return checkSettingsOK(genericModelParams, dlControl);
|
||||
@ -234,7 +203,7 @@ public class GenericDLClassifier extends StandardClassifierModel {
|
||||
}
|
||||
|
||||
@Override
|
||||
public DLModelWorker<GenericPrediction> getDLWorker() {
|
||||
public DLModelWorker<StandardPrediction> getDLWorker() {
|
||||
return this.genericModelWorker;
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class GenericModelWorker extends DLModelWorker<GenericPrediction> {
|
||||
public class GenericModelWorker extends DLModelWorker<StandardPrediction> {
|
||||
|
||||
/**
|
||||
* The generic model
|
||||
@ -52,8 +52,8 @@ public class GenericModelWorker extends DLModelWorker<GenericPrediction> {
|
||||
}
|
||||
|
||||
@Override
|
||||
public GenericPrediction makeModelResult(float[] prob, double time) {
|
||||
GenericPrediction model = new GenericPrediction(prob);
|
||||
public StandardPrediction makeModelResult(float[] prob, double time) {
|
||||
StandardPrediction model = new StandardPrediction(prob);
|
||||
model.setAnalysisTime(time);
|
||||
return model;
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import rawDeepLearningClassifier.dlClassification.PredictionResult;
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class GenericPrediction implements PredictionResult {
|
||||
public class StandardPrediction implements PredictionResult {
|
||||
|
||||
|
||||
/**
|
||||
@ -45,14 +45,14 @@ public class GenericPrediction implements PredictionResult {
|
||||
* @param classNameID - the ID's of the class names.
|
||||
* @param isBinary - true if the model result passed a binary test (usually one species above a threshold)
|
||||
*/
|
||||
public GenericPrediction(float[] prob, short[] classNameID, boolean isBinary) {
|
||||
public StandardPrediction(float[] prob, short[] classNameID, boolean isBinary) {
|
||||
this.prob=prob;
|
||||
this.classNameID = classNameID;
|
||||
this.binaryPass= isBinary;
|
||||
}
|
||||
|
||||
|
||||
public GenericPrediction(float[] prob, boolean isBinary) {
|
||||
public StandardPrediction(float[] prob, boolean isBinary) {
|
||||
this(prob, null, isBinary);
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ public class GenericPrediction implements PredictionResult {
|
||||
* Create a result for the Sound Spot classifier.
|
||||
* @param prob - the probability of each class.
|
||||
*/
|
||||
public GenericPrediction(float[] prob) {
|
||||
public StandardPrediction(float[] prob) {
|
||||
this(prob, null, false);
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.layoutFX.DLCLassiferModelUI;
|
||||
|
||||
/**
|
||||
@ -77,7 +77,7 @@ public class KetosClassifier extends StandardClassifierModel {
|
||||
|
||||
|
||||
@Override
|
||||
public DLModelWorker<GenericPrediction> getDLWorker() {
|
||||
public DLModelWorker<StandardPrediction> getDLWorker() {
|
||||
return getKetosWorker();
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
package rawDeepLearningClassifier.dlClassification.ketos;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
|
||||
/**
|
||||
@ -8,7 +8,7 @@ import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class KetosResult extends GenericPrediction {
|
||||
public class KetosResult extends StandardPrediction {
|
||||
|
||||
|
||||
public KetosResult(float[] prob) {
|
||||
|
@ -15,7 +15,7 @@ import PamView.dialog.warn.WarnOnce;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -25,7 +25,7 @@ import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class KetosWorker extends DLModelWorker<GenericPrediction> {
|
||||
public class KetosWorker extends DLModelWorker<StandardPrediction> {
|
||||
|
||||
|
||||
/**
|
||||
|
@ -4,7 +4,7 @@ import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelClassifier;
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.DLModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
|
||||
/**
|
||||
* Classifier which uses deep learning models from Koogus' framework.
|
||||
|
@ -8,7 +8,7 @@ import PamUtils.PamArrayUtils;
|
||||
import rawDeepLearningClassifier.dlClassification.PredictionResult;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.SoundSpotResult;
|
||||
import rawDeepLearningClassifier.dlClassification.dummyClassifier.DummyModelResult;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosResult;
|
||||
|
||||
/**
|
||||
@ -146,7 +146,7 @@ public class ModelResultBinaryFactory {
|
||||
break;
|
||||
default:
|
||||
//ideally should never be used.
|
||||
result = new GenericPrediction(data, isBinary);
|
||||
result = new StandardPrediction(data, isBinary);
|
||||
break;
|
||||
}
|
||||
//System.out.println("New model result: "+ type);
|
||||
@ -173,7 +173,7 @@ public class ModelResultBinaryFactory {
|
||||
return KETOS;
|
||||
}
|
||||
//must be last because this is often sub classed
|
||||
if (modelResult instanceof GenericPrediction) {
|
||||
if (modelResult instanceof StandardPrediction) {
|
||||
return GENERIC;
|
||||
}
|
||||
if (modelResult instanceof DummyModelResult) {
|
||||
|
@ -19,9 +19,16 @@ public class SegmenterDetectionGroup extends GroupDetection<PamDataUnit> {
|
||||
* @param startSample - the stratSample of the SEGMENT.
|
||||
* @param duration - the duration of the SEGMENT.
|
||||
*/
|
||||
public SegmenterDetectionGroup(long timeMilliseconds, int channelBitmap, long startSample, long duration) {
|
||||
super(timeMilliseconds, channelBitmap, startSample, duration);
|
||||
// TODO Auto-generated constructor stub
|
||||
public SegmenterDetectionGroup(long timeMilliseconds, int channelBitmap, long startSample, double duration) {
|
||||
super(timeMilliseconds, channelBitmap, startSample, (long) duration);
|
||||
this.setDurationInMilliseconds(duration);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAllowSubdetectionSharing() {
|
||||
//segmetns share sub detections
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ public class SegmenterGroupDataBlock extends PamDataBlock<SegmenterDetectionGrou
|
||||
|
||||
public SegmenterGroupDataBlock(String dataName, PamProcess parentProcess, int channelMap) {
|
||||
super(SegmenterDetectionGroup.class, dataName, parentProcess, channelMap);
|
||||
// TODO Auto-generated constructor stub
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import rawDeepLearningClassifier.defaultModels.HumpbackWhaleAtlantic;
|
||||
import rawDeepLearningClassifier.defaultModels.RightWhaleModel1;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
|
||||
/**
|
||||
@ -77,7 +77,7 @@ public class GenericDLClassifierTest {
|
||||
|
||||
groupedData.add(groupedRawData);
|
||||
|
||||
ArrayList<GenericPrediction> gwenericPrediciton = genericModelWorker.runModel(groupedData, soundData.sampleRate, 0);
|
||||
ArrayList<StandardPrediction> gwenericPrediciton = genericModelWorker.runModel(groupedData, soundData.sampleRate, 0);
|
||||
|
||||
float[] output = gwenericPrediciton.get(0).getPrediction();
|
||||
|
||||
@ -165,7 +165,7 @@ public class GenericDLClassifierTest {
|
||||
|
||||
groupedData.add(groupedRawData);
|
||||
|
||||
ArrayList<GenericPrediction> genericPrediction = genericModelWorker.runModel(groupedData, soundData.sampleRate, 0);
|
||||
ArrayList<StandardPrediction> genericPrediction = genericModelWorker.runModel(groupedData, soundData.sampleRate, 0);
|
||||
|
||||
float[] output = genericPrediction.get(0).getPrediction();
|
||||
|
||||
|
@ -13,7 +13,7 @@ import javax.sound.sampled.UnsupportedAudioFileException;
|
||||
import org.jamdev.jdl4pam.utils.DLUtils;
|
||||
import org.jamdev.jpamutils.wavFiles.AudioData;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosDLParams;
|
||||
import rawDeepLearningClassifier.dlClassification.ketos.KetosWorker2;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
@ -140,7 +140,7 @@ public class KetosDLClassifierTest {
|
||||
ArrayList<GroupedRawData> groupedData = new ArrayList<GroupedRawData>();
|
||||
groupedData.add(groupedRawData);
|
||||
|
||||
ArrayList<GenericPrediction> genericPrediciton = ketosWorker2.runModel(groupedData, soundData.sampleRate, 0);
|
||||
ArrayList<StandardPrediction> genericPrediciton = ketosWorker2.runModel(groupedData, soundData.sampleRate, 0);
|
||||
float[] output = genericPrediciton.get(0).getPrediction();
|
||||
|
||||
boolean testPassed= output[1]> ketosPredicitons[i][2]-0.1 && output[1]< ketosPredicitons[i][2]+0.1;
|
||||
|
@ -16,7 +16,7 @@ import org.jamdev.jpamutils.wavFiles.AudioData;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.GenericPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.genericModel.StandardPrediction;
|
||||
import rawDeepLearningClassifier.dlClassification.koogu.KooguModelWorker;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import us.hebi.matlab.mat.format.Mat5;
|
||||
@ -102,7 +102,7 @@ public class KooguDLClassifierTest {
|
||||
groupedData.add(groupedRawData);
|
||||
|
||||
|
||||
ArrayList<GenericPrediction> genericPrediciton = kooguWorker.runModel(groupedData, soundData.sampleRate, 0);
|
||||
ArrayList<StandardPrediction> genericPrediciton = kooguWorker.runModel(groupedData, soundData.sampleRate, 0);
|
||||
float[] output = genericPrediciton.get(0).getPrediction();
|
||||
|
||||
boolean testPassed= output[1]> kooguPredicitions[i][2]-0.1 && output[1]< kooguPredicitions[i][2]+0.1;
|
||||
|
@ -1,17 +1,17 @@
|
||||
package tethys.localization;
|
||||
|
||||
import nilus.CylindricalCoordinateType;
|
||||
import nilus.LocalizationType;
|
||||
import nilus.Localize.Effort.CoordinateReferenceSystem;
|
||||
//import nilus.CylindricalCoordinateType;
|
||||
//import nilus.LocalizationType;
|
||||
//import nilus.Localize.Effort.CoordinateReferenceSystem;
|
||||
|
||||
public class LocalizationHandler {
|
||||
|
||||
|
||||
public LocalizationType getLoc() {
|
||||
LocalizationType lt = new LocalizationType();
|
||||
CylindricalCoordinateType cct = new CylindricalCoordinateType();
|
||||
// cct.set
|
||||
CoordinateReferenceSystem cr;
|
||||
return null;
|
||||
}
|
||||
// public LocalizationType getLoc() {
|
||||
// LocalizationType lt = new LocalizationType();
|
||||
// CylindricalCoordinateType cct = new CylindricalCoordinateType();
|
||||
//// cct.set
|
||||
// CoordinateReferenceSystem cr;
|
||||
// return null;
|
||||
// }
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user