mirror of
https://github.com/PAMGuard/PAMGuard.git
synced 2024-11-24 16:12:24 +00:00
Merge branch 'main' of https://github.com/macster110/PAMGuard
This commit is contained in:
commit
61033b38ca
@ -228,6 +228,7 @@ public class DLControl extends PamControlledUnit implements PamSettings {
|
||||
|
||||
// classify the raw data segments.
|
||||
addPamProcess(dlClassifyProcess = new DLClassifyProcess(this, segmenterProcess.getSegmenterDataBlock()));
|
||||
dlClassifyProcess.addMultiPlexDataBlock(segmenterProcess.getSegmenteGrouprDataBlock());
|
||||
|
||||
//manages the names assigned to different output classes.
|
||||
dlClassNameManager = new DLClassNameManager(this);
|
||||
|
@ -101,6 +101,8 @@ public class DLPredictionPane extends PamBorderPane implements TDSettingsPane {
|
||||
if (dlPredictionPlotInfoFX.getDlControl().getDLModel()!=null) {
|
||||
//populate the prediction pane.
|
||||
DLClassName[] classNames = dlPredictionPlotInfoFX.getDlControl().getDLModel().getClassNames();
|
||||
|
||||
// System.out.println("MAKE MY CLASS NAMES: " + dlPredictionPlotInfoFX.getDlControl().getDLModel().getClassNames());
|
||||
|
||||
layoutColourPanes(classNames);
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ public class DLPredictionPlotInfoFX extends GenericLinePlotInfo {
|
||||
if (getDlControl().getDLModel()!=null) {
|
||||
DLClassName[] classNames = getDlControl().getDLModel().getClassNames();
|
||||
|
||||
// System.out.println("Class names are: !!! " + (classNames == null ? "null" : classNames.length));
|
||||
System.out.println("Class names are: !!! " + (classNames == null ? "null" : classNames.length));
|
||||
|
||||
if (classNames!=null) {
|
||||
|
||||
@ -105,8 +105,8 @@ public class DLPredictionPlotInfoFX extends GenericLinePlotInfo {
|
||||
dlPredParams.lineInfos[i] = new LineInfo(true, Color.rgb(0, 0, 255%(i*30 + 50)));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
getGraphSettingsPane().setParams();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@ import rawDeepLearningClassifier.logging.DLAnnotation;
|
||||
import rawDeepLearningClassifier.logging.DLAnnotationType;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDataBlock;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDetectionGroup;
|
||||
|
||||
/**
|
||||
* The deep learning classification process. This takes a segment of raw data from the segmenter.
|
||||
@ -35,7 +36,7 @@ import rawDeepLearningClassifier.segmenter.SegmenterDataBlock;
|
||||
*/
|
||||
public class DLClassifyProcess extends PamInstantProcess {
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Holds all model results but no other information
|
||||
*/
|
||||
@ -64,15 +65,13 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
/**
|
||||
* The DL buffer
|
||||
*/
|
||||
private ArrayList<GroupedRawData> classificationBuffer;
|
||||
|
||||
private ArrayList<PamDataUnit> classificationBuffer;
|
||||
|
||||
/**
|
||||
* The DL annotation type.
|
||||
*/
|
||||
private DLAnnotationType dlAnnotationType;
|
||||
|
||||
|
||||
/**
|
||||
* The last parent data for grouped data. This is used to ensure that DLDetections
|
||||
* correspond to the raw chunk of data from a parent detection e.g. a click detection.
|
||||
@ -82,8 +81,8 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
|
||||
public DLClassifyProcess(DLControl dlControl, SegmenterDataBlock parentDataBlock) {
|
||||
super(dlControl);
|
||||
|
||||
|
||||
|
||||
|
||||
this.setParentDataBlock(parentDataBlock);
|
||||
|
||||
// this.setParentDataBlock(parentDataBlock);
|
||||
@ -117,7 +116,7 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
overlayGraphics.setDetectionData(true);
|
||||
dlDetectionDataBlock.setOverlayDraw(overlayGraphics);
|
||||
|
||||
classificationBuffer = new ArrayList<GroupedRawData>();
|
||||
classificationBuffer = new ArrayList<PamDataUnit>();
|
||||
|
||||
//the process name.
|
||||
setProcessName("Deep Learning Classifier");
|
||||
@ -140,7 +139,7 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
System.err.println("Raw Deep Learning Classifier: The grouped source parameters were null."
|
||||
+ " A new instance has been created: Possible de-serialization error.");
|
||||
}
|
||||
|
||||
|
||||
|
||||
//important for downstream processes such as the bearing localiser.
|
||||
dlModelResultDataBlock.setChannelMap(dlControl.getDLParams().groupedSourceParams.getChannelBitmap());
|
||||
@ -166,7 +165,7 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
public void prepareProcess() {
|
||||
setupClassifierProcess();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* called for every process once the system model has been created.
|
||||
@ -194,36 +193,72 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
*/
|
||||
@Override
|
||||
public void newData(PamObservable obs, PamDataUnit pamRawData) {
|
||||
// System.out.println("NEW SEGMENTER DATA");
|
||||
|
||||
//the raw data units should appear in sequential channel order
|
||||
GroupedRawData rawDataUnit = (GroupedRawData) pamRawData;
|
||||
|
||||
if (checkGroupData(rawDataUnit)) {
|
||||
//check whether the classification buffer is full. If it is then run
|
||||
if (isClassificationBufferFull(classificationBuffer, rawDataUnit)) {
|
||||
|
||||
//first call run model to clear out the classification buffer if needs be
|
||||
runModel();
|
||||
if (pamRawData instanceof SegmenterDetectionGroup) {
|
||||
if (classificationBuffer.size()>=1) {
|
||||
// System.out.println("RUN THE MODEL FOR WHISTLES: ");
|
||||
runDetectionGroupModel();
|
||||
classificationBuffer.clear();
|
||||
}
|
||||
|
||||
classificationBuffer.add(rawDataUnit);
|
||||
else {
|
||||
classificationBuffer.add(pamRawData);
|
||||
}
|
||||
}
|
||||
// System.out.println("New raw data in: chan: " + PamUtils.getSingleChannel(pamRawData.getChannelBitmap()) +
|
||||
// " Size: " + pamRawData.getSampleDuration() + " first sample: " + rawDataUnit.getRawData()[0][0]
|
||||
// + "Parent UID: " + rawDataUnit.getParentDataUnit().getUID());
|
||||
|
||||
if (pamRawData instanceof GroupedRawData) {
|
||||
//the raw data units should appear in sequential channel order
|
||||
GroupedRawData rawDataUnit = (GroupedRawData) pamRawData;
|
||||
|
||||
if (checkGroupData(rawDataUnit)) {
|
||||
//check whether the classification buffer is full. If it is then run
|
||||
if (isRawClassificationBufferFull(classificationBuffer, rawDataUnit)) {
|
||||
|
||||
//first call run model to clear out the classification buffer if needs be
|
||||
runRawModel();
|
||||
classificationBuffer.clear();
|
||||
}
|
||||
|
||||
classificationBuffer.add(rawDataUnit);
|
||||
|
||||
}
|
||||
}
|
||||
// System.out.println("New raw data in: chan: " + PamUtils.getSingleChannel(pamRawData.getChannelBitmap()) +
|
||||
// " Size: " + pamRawData.getSampleDuration() + " first sample: " + rawDataUnit.getRawData()[0][0]
|
||||
// + "Parent UID: " + rawDataUnit.getParentDataUnit().getUID());
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Run a model for which the input is a detection group.
|
||||
*/
|
||||
private synchronized void runDetectionGroupModel() {
|
||||
if (classificationBuffer.size()<=0) return;
|
||||
ArrayList<PamDataUnit> classificationBufferTemp = (ArrayList<PamDataUnit>) classificationBuffer.clone();
|
||||
|
||||
ArrayList<? extends PredictionResult> modelResults = this.dlControl.getDLModel().runModel(classificationBufferTemp);
|
||||
|
||||
for (int i=0; i<classificationBufferTemp.size(); i++) {
|
||||
|
||||
if (modelResults!=null && modelResults.get(i)!=null) {
|
||||
DLDataUnit dlDataUnit = predictionToDataUnit(classificationBuffer.get(i), modelResults.get(i));
|
||||
this.dlModelResultDataBlock.addPamData(dlDataUnit); //here
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Run the model if the classification buffer is full.
|
||||
*/
|
||||
private void runModel() {
|
||||
private void runRawModel() {
|
||||
|
||||
if (classificationBuffer.size()<=0) return;
|
||||
|
||||
//run the deep learning algorithm
|
||||
ArrayList<GroupedRawData> classificationBufferTemp = (ArrayList<GroupedRawData>) classificationBuffer.clone();
|
||||
ArrayList<? extends PredictionResult> modelResults = this.dlControl.getDLModel().runModel(classificationBuffer);
|
||||
ArrayList<? extends PredictionResult> modelResults = this.dlControl.getDLModel().runModel(classificationBufferTemp);
|
||||
|
||||
if (modelResults==null) {
|
||||
return; //there has been a problem
|
||||
@ -235,18 +270,18 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
// System.out.println("Compare Times: " + PamCalendar.formatDBDateTime(modelResults.get(i).getTimeMillis(), true) +
|
||||
// " " + PamCalendar.formatDBDateTime(classificationBufferTemp.get(i).getTimeMilliseconds(), true) + " " +
|
||||
// modelResults.get(i).getPrediction()[1]);
|
||||
newModelResult(modelResults.get(i), classificationBufferTemp.get(i));
|
||||
newRawModelResult(modelResults.get(i), classificationBufferTemp.get(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the buffer is full and the results should be passed to the classification model.
|
||||
* Check whether the buffer is full and the results should be passed to the classification model if we are using GrpoupDataUnits
|
||||
* @param classificationBuffer2 - the classification buffer.
|
||||
* @param rawDataUnit - the next raw data unit to add to the buffer.
|
||||
* @return true if the buffer is full.
|
||||
*/
|
||||
private boolean isClassificationBufferFull(ArrayList<GroupedRawData> classificationBuffer2, GroupedRawData rawDataUnit) {
|
||||
private boolean isRawClassificationBufferFull(ArrayList<PamDataUnit> classificationBuffer2, GroupedRawData rawDataUnit) {
|
||||
|
||||
if (classificationBuffer2.size()==0) return false;
|
||||
|
||||
@ -254,16 +289,16 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
//1) It's over a max time
|
||||
//2) Contains different parent data units (if not from raw data).
|
||||
|
||||
GroupedRawData lastUnit = classificationBuffer2.get(classificationBuffer2.size()-1);
|
||||
GroupedRawData lastUnit = (GroupedRawData) classificationBuffer2.get(classificationBuffer2.size()-1);
|
||||
|
||||
if (!(lastUnit.getParentDataUnit() instanceof RawDataUnit) && lastUnit.getParentDataUnit()!=rawDataUnit.getParentDataUnit()) {
|
||||
//there is a new parent data unit.
|
||||
return true;
|
||||
}
|
||||
|
||||
//get the start time. Use min value instead of first data just in case units ar enot in order.
|
||||
//get the start time. Use min value instead of first data just in case units are not in order.
|
||||
long min = Long.MAX_VALUE;
|
||||
for (GroupedRawData groupedRawData: classificationBuffer2) {
|
||||
for (PamDataUnit groupedRawData: classificationBuffer2) {
|
||||
if (groupedRawData.getTimeMilliseconds()<min) {
|
||||
min=groupedRawData.getTimeMilliseconds();
|
||||
}
|
||||
@ -299,18 +334,7 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a data unit form a model result. This is called whenever data passes a prediction threshold.
|
||||
*
|
||||
* @param modelResult - the model result.
|
||||
* @param pamRawData - the raw data unit which the model result came from.
|
||||
*/
|
||||
public void newModelResult(PredictionResult modelResult, GroupedRawData pamRawData) {
|
||||
|
||||
//the model result may be null if the classifier uses a new thread.
|
||||
|
||||
//System.out.println("New segment: parent UID: " + pamRawData.getParentDataUnit().getUID() + " Prediciton: " + modelResult.getPrediction()[0]+ " " + getSourceParams().countChannelGroups());
|
||||
|
||||
private DLDataUnit predictionToDataUnit(PamDataUnit pamRawData, PredictionResult modelResult) {
|
||||
//create a new data unit - always add to the model result section.
|
||||
DLDataUnit dlDataUnit = new DLDataUnit(pamRawData.getTimeMilliseconds(), pamRawData.getChannelBitmap(),
|
||||
pamRawData.getStartSample(), pamRawData.getSampleDuration(), modelResult);
|
||||
@ -319,17 +343,35 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
dlDataUnit.setFrequency(new double[] {0, dlControl.getDLClassifyProcess().getSampleRate()/2});
|
||||
dlDataUnit.setDurationInMilliseconds(pamRawData.getDurationInMilliseconds());
|
||||
|
||||
return dlDataUnit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a data unit form a model result. This is called whenever data passes a prediction threshold.
|
||||
*
|
||||
* @param modelResult - the model result.
|
||||
* @param pamRawData - the raw data unit which the model result came from.
|
||||
*/
|
||||
public void newRawModelResult(PredictionResult modelResult, GroupedRawData pamRawData) {
|
||||
|
||||
//the model result may be null if the classifier uses a new thread.
|
||||
|
||||
//System.out.println("New segment: parent UID: " + pamRawData.getParentDataUnit().getUID() + " Prediciton: " + modelResult.getPrediction()[0]+ " " + getSourceParams().countChannelGroups());
|
||||
|
||||
//create a new data unit - always add to the model result section.
|
||||
DLDataUnit dlDataUnit = predictionToDataUnit(pamRawData, modelResult);
|
||||
|
||||
this.dlModelResultDataBlock.addPamData(dlDataUnit); //here
|
||||
|
||||
//need to implement multiple groups.
|
||||
for (int i=0; i<getSourceParams().countChannelGroups(); i++) {
|
||||
|
||||
// System.out.println("RawDataIn: chan: " + pamRawData.getChannelBitmap()+ " " +
|
||||
// PamUtils.hasChannel(getSourceParams().getGroupChannels(i), pamRawData.getChannelBitmap()) +
|
||||
// " grouped source: " +getSourceParams().getGroupChannels(i) + " Channels OK? "
|
||||
// +PamUtils.hasChannel(getSourceParams().getGroupChannels(i), PamUtils.getSingleChannel(pamRawData.getChannelBitmap()))
|
||||
// + " groupchan: " + getSourceParams().getGroupChannels(i) + " " + PamUtils.getLowestChannel(pamRawData.getChannelBitmap())
|
||||
// + " chan bitmap: " + pamRawData.getChannelBitmap());
|
||||
// System.out.println("RawDataIn: chan: " + pamRawData.getChannelBitmap()+ " " +
|
||||
// PamUtils.hasChannel(getSourceParams().getGroupChannels(i), pamRawData.getChannelBitmap()) +
|
||||
// " grouped source: " +getSourceParams().getGroupChannels(i) + " Channels OK? "
|
||||
// +PamUtils.hasChannel(getSourceParams().getGroupChannels(i), PamUtils.getSingleChannel(pamRawData.getChannelBitmap()))
|
||||
// + " groupchan: " + getSourceParams().getGroupChannels(i) + " " + PamUtils.getLowestChannel(pamRawData.getChannelBitmap())
|
||||
// + " chan bitmap: " + pamRawData.getChannelBitmap());
|
||||
|
||||
if (PamUtils.hasChannel(getSourceParams().getGroupChannels(i), PamUtils.getLowestChannel(pamRawData.getChannelBitmap()))) {
|
||||
|
||||
@ -375,7 +417,7 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
//Need to go by the parent data unit for merging data not the segments. Note that we may still add multiple
|
||||
//predicitions to a single data unit depending on how many segments it contains.
|
||||
|
||||
//System.out.println("New model data " + pamRawData.getParentDataUnit().getUID() + " " + groupDataBuffer[i].size() + " " + modelResultDataBuffer[i].size());
|
||||
//System.out.println("New model data " + pamRawData.getParentDataUnit().getUID() + " " + groupDataBuffer[i].size() + " " + modelResultDataBuffer[i].size());
|
||||
|
||||
if (pamRawData.getParentDataUnit()!=lastParentDataUnit[i]) {
|
||||
//save any data
|
||||
@ -422,25 +464,25 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
*
|
||||
*/
|
||||
public void forceRunClassifier(PamDataUnit dataUnit) {
|
||||
|
||||
|
||||
|
||||
|
||||
//first call run model to clear out the classification buffer if needs be
|
||||
runModel();
|
||||
runRawModel();
|
||||
classificationBuffer.clear();
|
||||
|
||||
|
||||
//need to implement multiple groups.
|
||||
for (int i=0; i<getSourceParams().countChannelGroups(); i++) {
|
||||
|
||||
|
||||
// System.out.println("Nummber segments " + groupDataBuffer[i].size() + " data unit len: " + dataUnit.getSampleDurationAsInt() + " samples UID: " + dataUnit.getUID());
|
||||
// System.out.println("RawDataIn: chan: " + dataUnit.getChannelBitmap()+ " " +
|
||||
// PamUtils.hasChannel(getSourceParams().getGroupChannels(i), dataUnit.getChannelBitmap()) +
|
||||
// " grouped source: " +getSourceParams().getGroupChannels(i));
|
||||
|
||||
|
||||
|
||||
|
||||
// System.out.println("Nummber segments " + groupDataBuffer[i].size() + " data unit len: " + dataUnit.getSampleDurationAsInt() + " samples UID: " + dataUnit.getUID());
|
||||
// System.out.println("RawDataIn: chan: " + dataUnit.getChannelBitmap()+ " " +
|
||||
// PamUtils.hasChannel(getSourceParams().getGroupChannels(i), dataUnit.getChannelBitmap()) +
|
||||
// " grouped source: " +getSourceParams().getGroupChannels(i));
|
||||
|
||||
|
||||
if (PamUtils.hasChannel(getSourceParams().getGroupChannels(i), PamUtils.getSingleChannel(dataUnit.getChannelBitmap()))) {
|
||||
if (groupDataBuffer[i].size()>0) {
|
||||
//System.out.println("Save click annotation to " + lastParentDataUnit[i].getUID());
|
||||
//System.out.println("Save click annotation to " + lastParentDataUnit[i].getUID());
|
||||
addDLAnnotation(dataUnit,groupDataBuffer[i],modelResultDataBuffer[i]);
|
||||
lastParentDataUnit[i]=null;
|
||||
clearBuffer(i);
|
||||
@ -532,15 +574,22 @@ public class DLClassifyProcess extends PamInstantProcess {
|
||||
|
||||
@Override
|
||||
public void pamStart() {
|
||||
// TODO Auto-generated method stub
|
||||
System.out.println("PREP MODEL:");
|
||||
// System.out.println("PREP MODEL:");
|
||||
this.dlControl.getDLModel().prepModel();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void pamStop() {
|
||||
runModel(); //make sure to run the last data in the buffer.
|
||||
|
||||
//make sure to run the last data in the buffer.
|
||||
if (this.classificationBuffer.size()>0) {
|
||||
if (classificationBuffer.get(0) instanceof GroupedRawData) {
|
||||
runRawModel(); //raw data or raw data units
|
||||
}
|
||||
if (classificationBuffer.get(0) instanceof SegmenterDetectionGroup) {
|
||||
runDetectionGroupModel(); //any other data units.
|
||||
}
|
||||
}
|
||||
|
||||
//21/11/2022 - it seems like this causes a memory leak when models are reopened and closed every file...
|
||||
//this.dlControl.getDLModel().closeModel();
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
|
||||
@Override
|
||||
public void prepModel() {
|
||||
// System.out.println("STANDARD CLASSIFIER MODEL PREP MODEL! !!!");
|
||||
System.out.println("STANDARD CLASSIFIER MODEL PREP MODEL! !!!: " + getDLParams().modelPath);
|
||||
// StandardModelParams oldParams = getDLParams().clone();
|
||||
|
||||
getDLWorker().prepModel(getDLParams(), dlControl);
|
||||
@ -115,6 +115,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
if (getDLWorker().isModelNull()) {
|
||||
dlClassifierWarning.setWarningMessage("There is no loaded " + getName() + " classifier model. " + getName() + " disabled.");
|
||||
WarningSystem.getWarningSystem().addWarning(dlClassifierWarning);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -187,7 +188,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
public DLStatus setModel(URI uri) {
|
||||
//will change the params if we do not clone.
|
||||
StandardModelParams.setModel(uri, this.getDLParams());
|
||||
this.prepModel();
|
||||
this.getDLWorker().prepModel(getDLParams(), dlControl);
|
||||
return getModelStatus();
|
||||
}
|
||||
|
||||
@ -271,7 +272,7 @@ public abstract class StandardClassifierModel implements DLClassiferModel, PamSe
|
||||
*/
|
||||
protected void newResult(StandardPrediction modelResult, PamDataUnit groupedRawData) {
|
||||
if (groupedRawData instanceof GroupedRawData) {
|
||||
this.dlControl.getDLClassifyProcess().newModelResult(modelResult, (GroupedRawData) groupedRawData);
|
||||
this.dlControl.getDLClassifyProcess().newRawModelResult(modelResult, (GroupedRawData) groupedRawData);
|
||||
}
|
||||
}
|
||||
//
|
||||
|
@ -32,6 +32,7 @@ import pamViewFX.fxNodes.PamSpinner;
|
||||
import pamViewFX.fxNodes.PamVBox;
|
||||
import pamViewFX.validator.PamValidator;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
|
||||
/**
|
||||
* Settings pane for SoundSpot
|
||||
@ -163,7 +164,7 @@ public abstract class StandardModelPane extends SettingsPane<StandardModelParams
|
||||
defaultSegmentLenChanged();
|
||||
//only set the hop if the user physically changes the toggle switch. This is not included in defaultSegmentLenChanged
|
||||
//becuase defaultSegmentLenChanged can be called from elsewhere
|
||||
int defaultsamples = getDefaultSamples();
|
||||
int defaultsamples = getDefaultSamples(dlClassifierModel, paramsClone);
|
||||
dlClassifierModel.getDLControl().getSettingsPane().getHopLenSpinner().getValueFactory().setValue((int) defaultsamples/2);
|
||||
});
|
||||
usedefaultSeg.setPadding(new Insets(0,0,0,0));
|
||||
@ -269,7 +270,7 @@ public abstract class StandardModelPane extends SettingsPane<StandardModelParams
|
||||
|
||||
// float sR = dlClassifierModel.getDLControl().getSettingsPane().getSelectedParentDataBlock().getSampleRate();
|
||||
|
||||
int defaultsamples = getDefaultSamples();
|
||||
int defaultsamples = getDefaultSamples(dlClassifierModel, paramsClone);
|
||||
|
||||
//work out the window length in samples
|
||||
dlClassifierModel.getDLControl().getSettingsPane().getSegmentLenSpinner().getValueFactory().setValue(defaultsamples);
|
||||
@ -282,7 +283,7 @@ public abstract class StandardModelPane extends SettingsPane<StandardModelParams
|
||||
}
|
||||
}
|
||||
|
||||
private int getDefaultSamples() {
|
||||
public static int getDefaultSamples(DLClassiferModel dlClassifierModel, StandardModelParams paramsClone) {
|
||||
float sR = dlClassifierModel.getDLControl().getSettingsPane().getSelectedParentDataBlock().getSampleRate();
|
||||
int defaultsamples = (int) (paramsClone.defaultSegmentLen.doubleValue()*sR/1000.0);
|
||||
return defaultsamples;
|
||||
|
@ -58,10 +58,11 @@ public class ArchiveModelWorker extends GenericModelWorker {
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare the model
|
||||
* Prepare the model.
|
||||
* Note it is important to put a synchonized here or the model loading can fail.
|
||||
*/
|
||||
@Override
|
||||
public void prepModel(StandardModelParams dlParams, DLControl dlControl) {
|
||||
public synchronized void prepModel(StandardModelParams dlParams, DLControl dlControl) {
|
||||
//ClassLoader origCL = Thread.currentThread().getContextClassLoader();
|
||||
try {
|
||||
|
||||
@ -198,6 +199,7 @@ public class ArchiveModelWorker extends GenericModelWorker {
|
||||
* @throws IOException
|
||||
*/
|
||||
public ArchiveModel loadModel(String currentPath2) throws MalformedModelException, IOException {
|
||||
|
||||
return new SimpleArchiveModel(new File(currentPath2));
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@ import org.jamdev.jdl4pam.transforms.DLTransformsFactory;
|
||||
import org.jamdev.jdl4pam.transforms.DLTransfromParams;
|
||||
|
||||
import PamController.PamControlledUnitSettings;
|
||||
import PamController.PamSettingManager;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.DLClassiferModel;
|
||||
import rawDeepLearningClassifier.dlClassification.StandardClassifierModel;
|
||||
@ -37,6 +38,9 @@ public class DelphinIDClassifier extends StandardClassifierModel {
|
||||
|
||||
public DelphinIDClassifier(DLControl dlControl) {
|
||||
super(dlControl);
|
||||
|
||||
//load the previous settings
|
||||
PamSettingManager.getInstance().registerSettings(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -91,7 +95,7 @@ public class DelphinIDClassifier extends StandardClassifierModel {
|
||||
@Override
|
||||
public boolean isDecision(StandardPrediction modelResult, StandardModelParams modelParmas) {
|
||||
//TODO
|
||||
//DelphinID uses a different decision making process to most of the standard classifiers which just pass a binary threhsoild.
|
||||
//DelphinID uses a different decision making process to most of the standard classifiers which just pass a binary threshold.
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -105,10 +109,11 @@ public class DelphinIDClassifier extends StandardClassifierModel {
|
||||
|
||||
@Override
|
||||
public boolean restoreSettings(PamControlledUnitSettings pamControlledUnitSettings) {
|
||||
|
||||
DelphinIDParams newParameters = (DelphinIDParams) pamControlledUnitSettings.getSettings();
|
||||
if (newParameters!=null) {
|
||||
delphinIDParams = (DelphinIDParams) newParameters.clone();
|
||||
//System.out.println("SoundSpot have been restored. : " + soundSpotParmas.classNames);
|
||||
// System.out.println("DELPHINID have been restored. : " + delphinIDParams.modelPath);
|
||||
if (delphinIDParams.dlTransfromParams!=null) {
|
||||
delphinIDParams.dlTransfroms = DLTransformsFactory.makeDLTransforms((ArrayList<DLTransfromParams>) delphinIDParams.dlTransfromParams);
|
||||
}
|
||||
|
@ -1,13 +1,26 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import PamController.SettingsPane;
|
||||
import javafx.geometry.Pos;
|
||||
import javafx.scene.Node;
|
||||
import javafx.scene.control.ContentDisplay;
|
||||
import javafx.scene.control.Label;
|
||||
import javafx.scene.control.Slider;
|
||||
import javafx.scene.control.Spinner;
|
||||
import javafx.scene.control.Tooltip;
|
||||
import javafx.scene.layout.Pane;
|
||||
import javafx.scene.paint.Color;
|
||||
import javafx.scene.text.Font;
|
||||
import javafx.scene.text.FontWeight;
|
||||
import pamViewFX.PamGuiManagerFX;
|
||||
import pamViewFX.fxGlyphs.PamGlyphDude;
|
||||
import pamViewFX.fxNodes.PamHBox;
|
||||
import pamViewFX.fxNodes.PamSpinner;
|
||||
import pamViewFX.fxNodes.PamVBox;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelPane;
|
||||
|
||||
/**
|
||||
* Settings pane for delphin ID.
|
||||
@ -15,60 +28,141 @@ import pamViewFX.fxNodes.PamVBox;
|
||||
* @author Jamie Macaulay
|
||||
*
|
||||
*/
|
||||
public class DelphinIDPane extends SettingsPane<DelphinIDParams> {
|
||||
|
||||
public class DelphinIDPane extends SettingsPane<DelphinIDParams> {
|
||||
|
||||
/**
|
||||
* The main pane.
|
||||
*/
|
||||
private Pane mainPane;
|
||||
|
||||
|
||||
/**
|
||||
* Reference to the delphinID classifier
|
||||
*/
|
||||
private DelphinIDClassifier delphinUIClassifier;
|
||||
|
||||
private PamSpinner<Double> detectionDensitySpinner;
|
||||
|
||||
private Slider decisionSlider;
|
||||
|
||||
private DelphinIDParams currentParams;
|
||||
|
||||
private File currentSelectedFile;
|
||||
|
||||
public DelphinIDPane(DelphinIDClassifier delphinUIClassifier) {
|
||||
super(null);
|
||||
this.delphinUIClassifier = delphinUIClassifier;
|
||||
mainPane = createPane();
|
||||
}
|
||||
|
||||
|
||||
private Pane createPane() {
|
||||
|
||||
|
||||
//font to use for title labels.
|
||||
Font font= Font.font(null, FontWeight.BOLD, 11);
|
||||
|
||||
Node classifierIcon;
|
||||
classifierIcon = delphinUIClassifier.getModelUI().getIcon();
|
||||
|
||||
|
||||
Label classifierIcon;
|
||||
classifierIcon = new Label("DelphinID");
|
||||
PamGuiManagerFX.titleFont2style(classifierIcon);
|
||||
//todo - will need to figure out colour of icon using CSS.
|
||||
Node icon = PamGlyphDude.createPamIcon("mdi2r-rss", Color.BLACK, PamGuiManagerFX.iconSize);
|
||||
icon.getStyleClass().add(getName());
|
||||
icon.setRotate(45);
|
||||
classifierIcon.setGraphic(icon);
|
||||
classifierIcon.setContentDisplay(ContentDisplay.RIGHT);
|
||||
|
||||
|
||||
// String settings = currentParams.toString();
|
||||
// classifierIcon.setTooltip(new Tooltip(settings));
|
||||
|
||||
PamVBox vBox = new PamVBox();
|
||||
vBox.setSpacing(5.);
|
||||
|
||||
|
||||
/**Classification thresholds etc to set.**/
|
||||
Label detectionDensity = new Label("Detection Density");
|
||||
detectionDensity.setFont(font);
|
||||
String tooltip = "Set the minimum detection density to attempt to classify.";
|
||||
detectionDensity.setTooltip(new Tooltip(tooltip));
|
||||
detectionDensitySpinner = new PamSpinner<Double>(0.0, 1.0, 0.3, 0.1);
|
||||
detectionDensitySpinner.setPrefWidth(70);
|
||||
detectionDensitySpinner.setEditable(true);
|
||||
detectionDensitySpinner.getStyleClass().add(Spinner.STYLE_CLASS_SPLIT_ARROWS_HORIZONTAL);
|
||||
|
||||
PamHBox minDensityHolder = new PamHBox();
|
||||
minDensityHolder.setAlignment(Pos.CENTER_RIGHT);
|
||||
minDensityHolder.setSpacing(5);
|
||||
Label minDensity = new Label("Min. density");
|
||||
minDensityHolder.getChildren().addAll(minDensity, detectionDensitySpinner);
|
||||
|
||||
/**Classification thresholds etc to set.**/
|
||||
Label classiferInfoLabel2 = new Label("Decision Threshold");
|
||||
classiferInfoLabel2.setTooltip(new Tooltip("Set the minimum prediciton value for selected classes. If a prediction exceeds this value "
|
||||
+ "a detection will be saved."));
|
||||
classiferInfoLabel2.setFont(font);
|
||||
|
||||
|
||||
vBox.getChildren().addAll(classifierIcon, classiferInfoLabel2);
|
||||
|
||||
|
||||
decisionSlider = new Slider();
|
||||
decisionSlider.setMin(0);
|
||||
decisionSlider.setMax(1);
|
||||
decisionSlider.setMajorTickUnit(0.2);
|
||||
decisionSlider.setMinorTickCount(10);
|
||||
decisionSlider.valueProperty().addListener((obsVal, oldVal, newVal)->{
|
||||
classiferInfoLabel2.setText(String.format("Decision Threshold %.2f", newVal));
|
||||
});
|
||||
decisionSlider.setShowTickMarks(true);
|
||||
decisionSlider.setShowTickLabels(true);
|
||||
|
||||
vBox.getChildren().addAll(classifierIcon, detectionDensity, minDensityHolder, classiferInfoLabel2, decisionSlider);
|
||||
|
||||
return vBox;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DelphinIDParams getParams(DelphinIDParams currParams) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
currParams.threshold = decisionSlider.getValue();
|
||||
currParams.minDetectionDensity = detectionDensitySpinner.getValue();
|
||||
return currParams;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setParams(DelphinIDParams input) {
|
||||
// TODO Auto-generated method stub
|
||||
this.currentParams = input;
|
||||
decisionSlider.setValue(input.threshold);
|
||||
detectionDensitySpinner.getValueFactory().setValue(input.minDetectionDensity);
|
||||
|
||||
if (input.modelPath!=null) {
|
||||
//this might
|
||||
currentSelectedFile = new File(currentParams.modelPath);
|
||||
|
||||
//this might change the paramsClone values if the model contains pamguard compatible metadata
|
||||
newModelSelected(currentSelectedFile);
|
||||
}
|
||||
}
|
||||
|
||||
private void newModelSelected(File currentSelectedFile2) {
|
||||
if (currentParams!=null && currentParams.defaultSegmentLen != null) {
|
||||
|
||||
//System.out.println("Defualt segment length: " + paramsClone.defaultSegmentLen);
|
||||
|
||||
//cannot use because, if the parent datablock has changed, samplerate will be out of date.
|
||||
// int defaultsamples = (int) this.soundSpotClassifier.millis2Samples(paramsClone.defaultSegmentLen);
|
||||
|
||||
|
||||
// float sR = dlClassifierModel.getDLControl().getSettingsPane().getSelectedParentDataBlock().getSampleRate();
|
||||
|
||||
int defaultsamples = StandardModelPane.getDefaultSamples(delphinUIClassifier, currentParams);
|
||||
|
||||
//work out the window length in samples
|
||||
delphinUIClassifier.getDLControl().getSettingsPane().getSegmentLenSpinner().getValueFactory().setValue(defaultsamples);
|
||||
// dlClassifierModel.getDLControl().getSettingsPane().getHopLenSpinner().getValueFactory().setValue((int) defaultsamples/2);
|
||||
|
||||
delphinUIClassifier.getDLControl().getSettingsPane().getSegmentLenSpinner().setDisable(true);
|
||||
}
|
||||
else {
|
||||
delphinUIClassifier.getDLControl().getSettingsPane().getSegmentLenSpinner().setDisable(false);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "delphinIDParams";
|
||||
@ -82,7 +176,7 @@ public class DelphinIDPane extends SettingsPane<DelphinIDParams> {
|
||||
@Override
|
||||
public void paneInitialized() {
|
||||
// TODO Auto-generated method stub
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -8,5 +8,11 @@ public class DelphinIDParams extends StandardModelParams {
|
||||
*
|
||||
*/
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
/**
|
||||
* The minimum detection density.
|
||||
*/
|
||||
public double minDetectionDensity = 0.3;
|
||||
|
||||
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.jamdev.jdl4pam.transforms.DLTransform;
|
||||
@ -9,18 +10,23 @@ import org.jamdev.jdl4pam.transforms.DLTransfromParams;
|
||||
import org.jamdev.jdl4pam.transforms.FreqTransform;
|
||||
import org.jamdev.jdl4pam.transforms.DLTransform.DLTransformType;
|
||||
import org.jamdev.jdl4pam.transforms.jsonfile.DLTransformsParser;
|
||||
import org.jamdev.jdl4pam.utils.DLMatFile;
|
||||
import org.jamdev.jdl4pam.utils.DLUtils;
|
||||
import org.json.JSONArray;
|
||||
import org.json.JSONObject;
|
||||
|
||||
import PamUtils.PamArrayUtils;
|
||||
import PamguardMVC.PamDataUnit;
|
||||
import ai.djl.Model;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
import rawDeepLearningClassifier.dlClassification.archiveModel.ArchiveModelWorker;
|
||||
import rawDeepLearningClassifier.dlClassification.delphinID.Whistles2Image.Whistle2ImageParams;
|
||||
import rawDeepLearningClassifier.segmenter.GroupedRawData;
|
||||
import rawDeepLearningClassifier.segmenter.SegmenterDetectionGroup;
|
||||
import us.hebi.matlab.mat.format.Mat5;
|
||||
import us.hebi.matlab.mat.types.MatFile;
|
||||
import us.hebi.matlab.mat.types.Matrix;
|
||||
import us.hebi.matlab.mat.types.Struct;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -48,6 +54,11 @@ public class DelphinIDWorker extends ArchiveModelWorker {
|
||||
System.err.println("Error: could not find whistle2image transform in DelphinID JSON file. Model will not work.");
|
||||
this.setModel(null); // set model to null to make sure nothing works and errors are thrown
|
||||
}
|
||||
|
||||
dlParams.binaryClassification = new boolean[dlParams.numClasses];
|
||||
for (int i=0; i<dlParams.numClasses; i++) {
|
||||
dlParams.binaryClassification[i]=true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -81,11 +92,50 @@ public class DelphinIDWorker extends ArchiveModelWorker {
|
||||
|
||||
return whistle2ImageParmas;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//something has gone wrong if we get here.
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
|
||||
private Struct imageStruct;
|
||||
int count = 0;
|
||||
/**
|
||||
* Tets by exporting results to a .mat file.
|
||||
* @param data
|
||||
* @param aSegment
|
||||
*/
|
||||
private void addIMage2MatFile(double[][] data, SegmenterDetectionGroup aSegment) {
|
||||
long dataStartMillis = 1340212413000L;
|
||||
|
||||
if (imageStruct==null) {
|
||||
imageStruct = Mat5.newStruct(100,1);
|
||||
}
|
||||
Matrix image = DLMatFile.array2Matrix(data);
|
||||
imageStruct.set("image", count, image);
|
||||
imageStruct.set("startmillis", count, Mat5.newScalar(aSegment.getSegmentStartMillis()));
|
||||
imageStruct.set("startseconds", count, Mat5.newScalar((aSegment.getSegmentStartMillis()-dataStartMillis)/1000.));
|
||||
|
||||
count++;
|
||||
|
||||
System.out.println("SAVED " +count + " TO MAT FILE");
|
||||
|
||||
if (count==10) {
|
||||
//create MatFile for saving the image data to.
|
||||
MatFile matFile = Mat5.newMatFile();
|
||||
matFile.addArray("whistle_images", imageStruct);
|
||||
//the path to the model
|
||||
String matImageSave = "C:/Users/Jamie Macaulay/MATLAB Drive/MATLAB/PAMGUARD/deep_learning/delphinID/whistleimages_pg.mat";
|
||||
try {
|
||||
Mat5.writeToFile(matFile,matImageSave);
|
||||
} catch (IOException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
@ -105,6 +155,7 @@ public class DelphinIDWorker extends ArchiveModelWorker {
|
||||
double[][] transformedData2; //spectrogram data
|
||||
for (int j=0; j<numChunks; j++) {
|
||||
|
||||
// System.out.println("Number of whistle to process: " + whistleGroups.get(j).getStartSecond() + "s " + whistleGroups.get(j).getSubDetectionsCount() + " " + whistleGroups.get(j).getSegmentStartMillis());
|
||||
//create the first transform and set then whistle data. Note that the absolute time limits are
|
||||
//contained within the SegmenterDetectionGroup unit.
|
||||
Whistles2Image whistles2Image = new Whistles2Image(whistleGroups.get(j), whistleImageParams);
|
||||
@ -120,6 +171,14 @@ public class DelphinIDWorker extends ArchiveModelWorker {
|
||||
|
||||
transformedData2 = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
transformedDataStack[j] = DLUtils.toFloatArray(transformedData2);
|
||||
|
||||
// //TEMP
|
||||
// try {
|
||||
// addIMage2MatFile(transformedData2, whistleGroups.get(j));
|
||||
// }
|
||||
// catch (Exception e) {
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
package rawDeepLearningClassifier.dlClassification.delphinID;
|
||||
|
||||
import java.awt.Color;
|
||||
import java.awt.Graphics2D;
|
||||
import java.awt.RenderingHints;
|
||||
import java.awt.image.BufferedImage;
|
||||
import java.awt.image.Raster;
|
||||
import java.util.ArrayList;
|
||||
@ -72,7 +74,7 @@ public class Whistles2Image extends FreqTransform {
|
||||
for (int i=0; i<imaged.length; i++) {
|
||||
for (int j=0; j<imaged[0].length; j++) {
|
||||
color = raster.getPixel(i, j, color);
|
||||
imaged[i][j] = color[0]/255.; //normalize
|
||||
imaged[i][j] = (255-color[0])/255.; //normalize
|
||||
}
|
||||
}
|
||||
//
|
||||
@ -116,16 +118,19 @@ public class Whistles2Image extends FreqTransform {
|
||||
// }
|
||||
//
|
||||
// }
|
||||
// System.out.println("Whistle group: " + segStart);
|
||||
|
||||
for (int i=0; i<whistleGroup.getSubDetectionsCount(); i++) {
|
||||
|
||||
whistleContour = (AbstractWhistleDataUnit) whistleGroup.getSubDetection(i);
|
||||
// System.out.println("Whistle start time: " + (segStart - whistleContour.getTimeMilliseconds())/1000. + " end: " + (segStart - whistleContour.getTimeMilliseconds() + whistleContour.getDurationInMilliseconds())/1000.);
|
||||
|
||||
|
||||
|
||||
// System.out.println("Whistle start time: " + (whistleContour.getTimeMilliseconds()-segStart)/1000. + " end: " +
|
||||
// (whistleContour.getTimeMilliseconds() - (segStart + whistleContour.getDurationInMilliseconds()))/1000.
|
||||
// + " millis: " + whistleContour.getTimeMilliseconds() + " first slice: " + whistleContour.getTimesInSeconds()[0]);
|
||||
|
||||
double[][] contourD = new double[whistleContour.getSliceCount()][2];
|
||||
for (int j=0; j<whistleContour.getSliceCount(); j++) {
|
||||
contourD[j][0] = (whistleContour.getTimeMilliseconds()-segStart)/1000. + whistleContour.getTimesInSeconds()[j];
|
||||
contourD[j][0] = (whistleContour.getTimeMilliseconds()-segStart)/1000. + (whistleContour.getTimesInSeconds()[j]-whistleContour.getTimesInSeconds()[0]);
|
||||
contourD[j][1] = whistleContour.getFreqsHz()[j];
|
||||
}
|
||||
contours.add(contourD);
|
||||
@ -173,7 +178,7 @@ public class Whistles2Image extends FreqTransform {
|
||||
* @param markerSize - the marker size in pixels
|
||||
* @return an image with y axis as frequency and x axis as time.
|
||||
*/
|
||||
private BufferedImage makeScatterImage(ArrayList<double[][]> points, double[] size, double[] xlims, double[] ylims, double markerSize) {
|
||||
public static BufferedImage makeScatterImage(ArrayList<double[][]> points, double[] size, double[] xlims, double[] ylims, double markerSize) {
|
||||
|
||||
BufferedImage canvas = new BufferedImage((int) size[0], (int) size[1], BufferedImage.TYPE_INT_RGB);
|
||||
|
||||
@ -187,9 +192,13 @@ public class Whistles2Image extends FreqTransform {
|
||||
x = ((points.get(j)[i][0]-xlims[0])/(xlims[1]-xlims[0]))*size[0];
|
||||
y = ((points.get(j)[i][1]-ylims[0])/(ylims[1]-ylims[0]))*size[1];
|
||||
|
||||
// System.out.println("Fill oval: x" + x + " y: " + y + " time: " + points.get(j)[i][0]);
|
||||
// System.out.println("Fill oval: x " + x + " y: " + y + " time: " + points.get(j)[i][0]);
|
||||
|
||||
Graphics2D g2 = (Graphics2D) canvas.getGraphics();
|
||||
|
||||
canvas.getGraphics().fillOval((int) (x+markerSize/2),(int) (y-markerSize/2), (int) markerSize,(int) markerSize);
|
||||
g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
|
||||
|
||||
g2.fillOval((int) (x+markerSize/2),(int) (y-markerSize/2), (int) markerSize,(int) markerSize);
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,6 +215,7 @@ public class Whistles2Image extends FreqTransform {
|
||||
public double[] size;
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -185,7 +185,7 @@ public class GenericDLClassifier extends StandardClassifierModel {
|
||||
* @param groupedRawData - the grouped raw data.
|
||||
*/
|
||||
protected void newResult(StandardPrediction modelResult, GroupedRawData groupedRawData) {
|
||||
this.dlControl.getDLClassifyProcess().newModelResult(modelResult, groupedRawData);
|
||||
this.dlControl.getDLClassifyProcess().newRawModelResult(modelResult, groupedRawData);
|
||||
}
|
||||
|
||||
|
||||
|
@ -9,6 +9,7 @@ import org.jamdev.jdl4pam.transforms.FreqTransform;
|
||||
|
||||
import PamModel.PamModel;
|
||||
import PamModel.PamModel.PluginClassloader;
|
||||
import PamUtils.PamArrayUtils;
|
||||
import rawDeepLearningClassifier.DLControl;
|
||||
import rawDeepLearningClassifier.dlClassification.animalSpot.StandardModelParams;
|
||||
|
||||
@ -32,7 +33,7 @@ public class GenericModelWorker extends DLModelWorker<StandardPrediction> {
|
||||
|
||||
@Override
|
||||
public float[] runModel(float[][][] transformedDataStack) {
|
||||
//System.out.println("RUN GENERIC MODEL: " + transformedDataStack.length + " " + transformedDataStack[0].length + " " + transformedDataStack[0][0].length);
|
||||
// System.out.println("RUN GENERIC MODEL: " + transformedDataStack.length + " " + transformedDataStack[0].length + " " + transformedDataStack[0][0].length);
|
||||
// System.out.println("RUN GENERIC MODEL: " + transformedDataStack[0][0][0]);
|
||||
float[] results;
|
||||
if (freqTransform)
|
||||
@ -47,7 +48,8 @@ public class GenericModelWorker extends DLModelWorker<StandardPrediction> {
|
||||
//System.out.println("RUN GENERIC MODEL WAVE: " + waveStack.length + " " + waveStack[0].length + " " + waveStack[0][0]);
|
||||
results = getModel().runModel(waveStack);
|
||||
}
|
||||
//System.out.println("GENERIC MODEL RESULTS: " + results== null ? null : results.length);
|
||||
// System.out.println("GENERIC MODEL RESULTS: " + (results== null ? null : results.length));
|
||||
// PamArrayUtils.printArray(results);
|
||||
return results;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,8 @@ public class SegmenterDetectionGroup extends GroupDetection<PamDataUnit> {
|
||||
*/
|
||||
private long segMillis;
|
||||
|
||||
private double timeS;
|
||||
|
||||
/**
|
||||
* Constructor for a group of detections within a detection. Note that some
|
||||
* longer detections (e.g. whistles) may have sections outside the segment.
|
||||
@ -28,12 +30,12 @@ public class SegmenterDetectionGroup extends GroupDetection<PamDataUnit> {
|
||||
* @param timeMilliseconds - this is the start of the SEGMENT - Note that the
|
||||
* @param channelBitmap - channels of all detections
|
||||
* @param startSample - the stratSample of the SEGMENT.
|
||||
* @param duration - the duration of the SEGMENT.
|
||||
* @param duration - the duration of the SEGMENT in milliseconds.
|
||||
*/
|
||||
public SegmenterDetectionGroup(long timeMilliseconds, int channelBitmap, long startSample, double duration) {
|
||||
super(timeMilliseconds, channelBitmap, startSample, (long) duration);
|
||||
this.setDurationInMilliseconds(duration);
|
||||
this.segMillis =timeMilliseconds;
|
||||
this.segMillis = timeMilliseconds;
|
||||
this.segDuration = duration;
|
||||
}
|
||||
|
||||
@ -52,5 +54,17 @@ public class SegmenterDetectionGroup extends GroupDetection<PamDataUnit> {
|
||||
return segDuration;
|
||||
}
|
||||
|
||||
public long getSegmentEndMillis() {
|
||||
return (long) (segMillis+segDuration);
|
||||
}
|
||||
|
||||
public void setStartSecond(double timeS) {
|
||||
this.timeS = timeS;
|
||||
}
|
||||
|
||||
public double getStartSecond() {
|
||||
return timeS;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -12,6 +12,8 @@ public class SegmenterGroupDataBlock extends PamDataBlock<SegmenterDetectionGrou
|
||||
|
||||
public SegmenterGroupDataBlock(String dataName, PamProcess parentProcess, int channelMap) {
|
||||
super(SegmenterDetectionGroup.class, dataName, parentProcess, channelMap);
|
||||
this.setNaturalLifetimeMillis(15000); //do not want to keep the data for very long - it's raw data segmnents so memory intensive
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -6,6 +6,7 @@ import java.util.Arrays;
|
||||
|
||||
import PamController.PamController;
|
||||
import PamDetection.RawDataUnit;
|
||||
import PamUtils.PamArrayUtils;
|
||||
import PamUtils.PamUtils;
|
||||
import PamView.GroupedSourceParameters;
|
||||
import PamView.PamDetectionOverlayGraphics;
|
||||
@ -60,7 +61,21 @@ public class SegmenterProcess extends PamProcess {
|
||||
/**
|
||||
* Holds groups of data units which are within a defined segment.
|
||||
*/
|
||||
private SegmenterGroupDataBlock segmenterGroupDataBlock;
|
||||
private SegmenterGroupDataBlock segmenterGroupDataBlock;
|
||||
|
||||
/**
|
||||
* The first clock update - segments for detection groups (not raw sound data) are referenced from this.
|
||||
*/
|
||||
private long firstClockUpdate;
|
||||
|
||||
/**
|
||||
* The current segmenter detection group.
|
||||
*/
|
||||
private SegmenterDetectionGroup[] segmenterDetectionGroup = null;
|
||||
|
||||
private long segmentStart=-1;
|
||||
|
||||
private long segmenterEnd=-1;
|
||||
|
||||
|
||||
public SegmenterProcess(DLControl pamControlledUnit, PamDataBlock parentDataBlock) {
|
||||
@ -100,6 +115,8 @@ public class SegmenterProcess extends PamProcess {
|
||||
public void prepareProcess() {
|
||||
setupSegmenter();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* A list of data block class types which are compatible as parent data blocks
|
||||
@ -111,7 +128,7 @@ public class SegmenterProcess extends PamProcess {
|
||||
*/
|
||||
@Override
|
||||
public ArrayList getCompatibleDataUnits(){
|
||||
return new ArrayList<Class<? extends PamDataUnit>>(Arrays.asList(RawDataUnit.class, ClickDetection.class, ClipDataUnit.class));
|
||||
return new ArrayList<Class<? extends PamDataUnit>>(Arrays.asList(RawDataUnit.class, ClickDetection.class, ClipDataUnit.class, ConnectedRegionDataUnit.class));
|
||||
}
|
||||
|
||||
|
||||
@ -149,6 +166,7 @@ public class SegmenterProcess extends PamProcess {
|
||||
if (chanGroups!=null) {
|
||||
currentRawChunks = new GroupedRawData[chanGroups.length];
|
||||
nextRawChunks = new GroupedRawData[chanGroups.length][];
|
||||
segmenterDetectionGroup = new SegmenterDetectionGroup[chanGroups.length];
|
||||
}
|
||||
|
||||
|
||||
@ -177,6 +195,8 @@ public class SegmenterProcess extends PamProcess {
|
||||
if (rawDataBlock==null) return;
|
||||
|
||||
setParentDataBlock(rawDataBlock);
|
||||
|
||||
this.firstClockUpdate = -1;
|
||||
|
||||
}
|
||||
|
||||
@ -203,9 +223,10 @@ public class SegmenterProcess extends PamProcess {
|
||||
*/
|
||||
public void newData(PamDataUnit pamRawData) {
|
||||
|
||||
// System.out.println("New data for segmenter: " + pamRawData);
|
||||
|
||||
if (!dlControl.getDLParams().useDataSelector || dlControl.getDataSelector().scoreData(pamRawData)>0) {
|
||||
|
||||
//System.out.println("New data for segmenter: " + pamRawData);
|
||||
if (pamRawData instanceof RawDataUnit) {
|
||||
newRawDataUnit(pamRawData);
|
||||
}
|
||||
@ -224,18 +245,151 @@ public class SegmenterProcess extends PamProcess {
|
||||
|
||||
|
||||
/**
|
||||
* A new whistle data unit.
|
||||
* A new detection data unit i.e. this is only if we have detection data which is being grouped into segments.
|
||||
* @param dataUnit - the whistle data unit.
|
||||
*/
|
||||
private void newWhistleData(PamDataUnit dataUnit) {
|
||||
private synchronized void newWhistleData(PamDataUnit dataUnit) {
|
||||
|
||||
|
||||
ConnectedRegionDataUnit whistle = (ConnectedRegionDataUnit) dataUnit;
|
||||
|
||||
//TODO
|
||||
//this contains no raw data so we are branching off on a completely different processing path here.
|
||||
//Whislte data units are saved to a buffer and then fed to the deep learning algorohtm
|
||||
//Whislte data units are saved to a buffer and then fed to the deep learning algorithms
|
||||
|
||||
int[] chanGroups = dlControl.getDLParams().groupedSourceParams.getChannelGroups();
|
||||
|
||||
int index = -1;
|
||||
for (int i=0; i<chanGroups.length; i++) {
|
||||
if (dlControl.getDLParams().groupedSourceParams.getGroupChannels(chanGroups[i])==dataUnit.getChannelBitmap()) {
|
||||
index=i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//FIXME - TWEMP
|
||||
index =0;
|
||||
|
||||
// System.out.println("Whistle data: " + ((dataUnit.getTimeMilliseconds()-firstClockUpdate)/1000.) + "s " + chanGroups.length + " " + index + " " + dataUnit.getChannelBitmap());
|
||||
// PamArrayUtils.printArray(chanGroups);
|
||||
|
||||
if (index<0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (segmenterDetectionGroup[index] == null || !detectionInSegment(dataUnit, segmenterDetectionGroup[index])) {
|
||||
|
||||
//System.out.println("Whiste not in segment");
|
||||
//iterate until we find the correct time for this detection. This keeps the segments consist no matter
|
||||
//the data units. What we do not want is the first data unit defining the start of the first segment.
|
||||
if (segmentStart <0) {
|
||||
segmentStart= firstClockUpdate;
|
||||
segmenterEnd = (long) (segmentStart + getSegmentLenMillis());
|
||||
}
|
||||
|
||||
while(!detectionInSegment(dataUnit, segmentStart, segmenterEnd)) {
|
||||
nextGroupSegment( index);
|
||||
}
|
||||
}
|
||||
|
||||
segmenterDetectionGroup[index].addSubDetection(whistle);
|
||||
// System.out.println("Segment sub detection count: " + segmenterDetectionGroup[index].getSubDetectionsCount());
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate to the next group segment
|
||||
* @param index - the group index;
|
||||
*/
|
||||
private void nextGroupSegment(int index) {
|
||||
|
||||
// System.out.println("----------------------------------");
|
||||
|
||||
segmentStart = (long) (segmentStart+ getSegmentHopMillis());
|
||||
segmenterEnd = (long) (segmentStart + getSegmentLenMillis());
|
||||
|
||||
int[] chanGroups = dlControl.getDLParams().groupedSourceParams.getChannelGroups();
|
||||
|
||||
long startSample = this.absMillisecondsToSamples(segmentStart);
|
||||
|
||||
//now we need to create a new data unit.
|
||||
SegmenterDetectionGroup aSegment = new SegmenterDetectionGroup(segmentStart, chanGroups[index], startSample, getSegmentLenMillis());
|
||||
aSegment.setStartSecond((segmentStart-firstClockUpdate)/1000.);
|
||||
|
||||
//save the last segment
|
||||
if (segmenterDetectionGroup[index]!=null) {
|
||||
//add any data units from the previous segment (because segments may overlap);
|
||||
int count =0;
|
||||
for (int i=0; i<segmenterDetectionGroup[index].getSubDetectionsCount() ; i++) {
|
||||
if (detectionInSegment(segmenterDetectionGroup[index].getSubDetection(i), aSegment)){
|
||||
aSegment.addSubDetection(segmenterDetectionGroup[index].getSubDetection(i));
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
// System.out.println("SAVE WHISTLE SEGMENT!: " + ((segmenterDetectionGroup[index].getSegmentStartMillis()-firstClockUpdate)/1000.) + "s" + " " + " no. whsitles: " + segmenterDetectionGroup[index].getSubDetectionsCount() + " " + segmenterDetectionGroup[index].getSegmentStartMillis() + " " + segmenterDetectionGroup[index]);
|
||||
//save the data unit to the data block
|
||||
if (segmenterDetectionGroup[index].getSubDetectionsCount()>0) {
|
||||
this.segmenterGroupDataBlock.addPamData(segmenterDetectionGroup[index]);
|
||||
}
|
||||
}
|
||||
|
||||
segmenterDetectionGroup[index] = aSegment;
|
||||
// System.out.println("NEW SEGMENT START!: " + (segmentStart-firstClockUpdate)/1000. + "s" + " " + segmenterDetectionGroup[index].getSegmentStartMillis()+ " " +segmenterDetectionGroup[index]);
|
||||
|
||||
}
|
||||
|
||||
private boolean detectionInSegment(PamDataUnit dataUnit, SegmenterDetectionGroup segmenterDetectionGroup2) {
|
||||
return detectionInSegment(dataUnit, segmenterDetectionGroup2.getSegmentStartMillis(),
|
||||
(long) (segmenterDetectionGroup2.getSegmentStartMillis()+segmenterDetectionGroup2.getSegmentDuration()));
|
||||
}
|
||||
|
||||
|
||||
private boolean detectionInSegment(PamDataUnit dataUnit, long segStart, long segEnd) {
|
||||
//TODO - this is going to fail for very small segments.
|
||||
long whistleStart = dataUnit.getTimeMilliseconds();
|
||||
long whistleEnd = whistleStart + dataUnit.getDurationInMilliseconds().longValue();
|
||||
|
||||
if ((whistleStart>=segStart && whistleStart<segEnd) || ((whistleEnd>=segStart && whistleEnd<segEnd))){
|
||||
//some part of the whistle is in the segment.
|
||||
// System.out.println("Whsitle in segment: " + whistleStart + " " + whistleEnd);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private double getSegmentLenMillis() {
|
||||
double millis = (dlControl.getDLParams().rawSampleSize/this.getSampleRate())*1000.;
|
||||
return millis;
|
||||
}
|
||||
|
||||
private double getSegmentHopMillis() {
|
||||
double millis = (dlControl.getDLParams().sampleHop/this.getSampleRate())*1000.;
|
||||
return millis;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int count=0;
|
||||
public void masterClockUpdate(long milliSeconds, long sampleNumber) {
|
||||
super.masterClockUpdate(milliSeconds, sampleNumber);
|
||||
if (firstClockUpdate<0) {
|
||||
firstClockUpdate = milliSeconds;
|
||||
}
|
||||
|
||||
//want to make sure that a segment is saved if we suddenly lose
|
||||
// a steady stream of data units. This ensure that the segments are saved properly
|
||||
//after the master clock has gone past the end of the current segment.
|
||||
if (segmenterDetectionGroup!=null && count%20==0) {
|
||||
for (int i=0; i<segmenterDetectionGroup.length; i++) {
|
||||
if (segmenterDetectionGroup[i]!=null && segmenterDetectionGroup[i].getSegmentEndMillis()<milliSeconds) {
|
||||
nextGroupSegment(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
count++;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
@ -347,7 +501,7 @@ public class SegmenterProcess extends PamProcess {
|
||||
|
||||
/**
|
||||
* Take a raw sound chunk of data and segment into discrete groups. This handles
|
||||
* much situations e.g. where the segment is much larger than the raw data or
|
||||
* many situations e.g. where the segment is much larger than the raw data or
|
||||
* where the segment is much small than each rawDataChunk returning multiple
|
||||
* segments.
|
||||
*
|
||||
@ -363,7 +517,7 @@ public class SegmenterProcess extends PamProcess {
|
||||
|
||||
/**
|
||||
* Take a raw sound chunk of data and segment into discrete groups. This handles
|
||||
* much situations e.g. where the segment is much larger than the raw data or
|
||||
* many situations e.g. where the segment is much larger than the raw data or
|
||||
* where the segment is much small than each rawDataChunk returning multiple
|
||||
* segments.
|
||||
*
|
||||
@ -595,7 +749,7 @@ public class SegmenterProcess extends PamProcess {
|
||||
//Need to copy a section of the old data into the new
|
||||
if (nextRawChunks[i]!=null) {
|
||||
/**
|
||||
* It's very important to clone this as otherwise some very weird things happnen as the units are
|
||||
* It's very important to clone this as otherwise some very weird things happen as the units are
|
||||
* passed to downstream processes.
|
||||
*/
|
||||
currentRawChunks[i] = nextRawChunks[i][nextRawChunks[i].length-1].clone(); //in an unlikely situation this could be null should be picked up by the first null check.
|
||||
@ -692,4 +846,9 @@ public class SegmenterProcess extends PamProcess {
|
||||
return segmenterDataBlock;
|
||||
}
|
||||
|
||||
|
||||
public SegmenterGroupDataBlock getSegmenteGrouprDataBlock() {
|
||||
return this.segmenterGroupDataBlock;
|
||||
}
|
||||
|
||||
}
|
113
src/test/rawDeepLearningClassifier/DelphinIDTest.java
Normal file
113
src/test/rawDeepLearningClassifier/DelphinIDTest.java
Normal file
@ -0,0 +1,113 @@
|
||||
package test.rawDeepLearningClassifier;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import java.awt.image.BufferedImage;
|
||||
import java.awt.image.Raster;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.jamdev.jdl4pam.transforms.DLTransform;
|
||||
import org.jamdev.jdl4pam.transforms.FreqTransform;
|
||||
import org.jamdev.jdl4pam.transforms.DLTransform.DLTransformType;
|
||||
import org.jamdev.jdl4pam.utils.DLMatFile;
|
||||
import org.jamdev.jdl4pam.utils.DLUtils;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import rawDeepLearningClassifier.dlClassification.delphinID.Whistles2Image;
|
||||
import us.hebi.matlab.mat.format.Mat5;
|
||||
import us.hebi.matlab.mat.types.MatFile;
|
||||
import us.hebi.matlab.mat.types.Matrix;
|
||||
|
||||
public class DelphinIDTest {
|
||||
|
||||
|
||||
@Test
|
||||
public void whistle2ImageTest() {
|
||||
|
||||
System.out.println("Whislte2Image test started");
|
||||
|
||||
/**
|
||||
* Test whether the Whistles2Image transform works properly
|
||||
*/
|
||||
String relMatPath = "./src/test/resources/rawDeepLearningClassifier/DelphinID/whistle_image_example.mat";
|
||||
|
||||
Path path = Paths.get(relMatPath);
|
||||
|
||||
// Create MAT file with a scalar in a nested struct
|
||||
try {
|
||||
MatFile matFile = Mat5.readFromFile(path.toString());
|
||||
Matrix array = matFile.getArray("tfvalues");
|
||||
|
||||
//the values for the whistle detector.
|
||||
double[][] whistleValues = DLMatFile.matrix2array(array);
|
||||
|
||||
//the image after compression
|
||||
array = matFile.getArray("image1compressedgrayscale");
|
||||
double[][] compressedWhistleImage = DLMatFile.matrix2array(array);
|
||||
|
||||
//the whistle2Image transform image
|
||||
array = matFile.getArray("image1originalgrayscalenorm");
|
||||
double[][] whislteImage = DLMatFile.matrix2array(array);
|
||||
|
||||
//now perform the image transform in Java
|
||||
double[] freqLimits = new double[] {0., 20000.};
|
||||
double[] size = new double[] {680., 480.};
|
||||
|
||||
ArrayList<double[][]> whistleImageArr = new ArrayList<double[][]>();
|
||||
whistleImageArr.add(whistleValues);
|
||||
|
||||
BufferedImage canvas = Whistles2Image.makeScatterImage(whistleImageArr, size, new double[]{48, 48. + 4.}, freqLimits, 5.);
|
||||
|
||||
double[][] imaged = new double[(int) size[0]][(int) size[1]];
|
||||
|
||||
float[] color = new float[3];
|
||||
Raster raster = canvas.getData();
|
||||
for (int i=0; i<imaged.length; i++) {
|
||||
for (int j=0; j<imaged[0].length; j++) {
|
||||
color = raster.getPixel(i, j, color);
|
||||
imaged[i][j] = (255-color[0])/255.; //normalize
|
||||
}
|
||||
}
|
||||
|
||||
ArrayList<DLTransform> transforms = new ArrayList<DLTransform>();
|
||||
transforms.add(new FreqTransform(DLTransformType.SPECRESIZE, new Number[] {Integer.valueOf(64), Integer.valueOf(48)}));
|
||||
|
||||
//
|
||||
// //set the spec transform
|
||||
// ((FreqTransform) transforms.get(0)).setSpecTransfrom(whistles2Image.getSpecTransfrom());
|
||||
//
|
||||
// //process all the transforms.
|
||||
// DLTransform transform = modelTransforms.get(0);
|
||||
// for (int i =0; i<modelTransforms.size(); i++) {
|
||||
// transform = modelTransforms.get(i).transformData(transform);
|
||||
// }
|
||||
//
|
||||
// transformedData2 = ((FreqTransform) transform).getSpecTransfrom().getTransformedData();
|
||||
// transformedDataStack[j] = DLUtils.toFloatArray(transformedData2);
|
||||
//
|
||||
|
||||
|
||||
//now save this image to a MATFILE
|
||||
// Create MAT file with a scalar in a nested struct
|
||||
MatFile matFileWrite = Mat5.newMatFile()
|
||||
.addArray("image1originalgrayscalenorm",DLMatFile.array2Matrix(imaged));
|
||||
// Serialize to disk using default configurations
|
||||
Mat5.writeToFile(matFileWrite, "C:\\Users\\Jamie Macaulay\\MATLAB Drive\\MATLAB\\PAMGUARD\\deep_learning\\delphinID\\whistle_image_example_java.mat");
|
||||
|
||||
System.out.println("Whislte2Image test finished");
|
||||
|
||||
} catch (IOException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
assertEquals(false, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
Loading…
Reference in New Issue
Block a user