3QTZPFWEDJVI4FKNCJFOAEHTI5C7P2NQANRIW7D54RHXKETNEYLQC SB4FZEB6ZLUHQNM3M76OGNNJY6THOF55S6JO6Q7IGXWE7OA7INFAC GQ2FYLG2VGDDKQ7TJTCFM2XPLJOAX2N737OZWPRCNKSFIH7YJ6EQC 5KIKDA72HM6JFIPKOWGLM2EO7D5PTSK7WEVYV3YZWGMG3M34PJXQC XEFQ73KDMQJ5YP5UBGZM3Z2GZUVRCVWMTIADZQSUIJKVDTBODP4AC 3JA7HYRMHV57SIMGMGPDOMKQ3NBQS2SKOX3EKDHRBQRP7ZPZGFTQC OFL26ZLGKGJERNHPURO6CBN4FT3VN5MLASOAPXO6GJVCMSMCLPUAC TLLVARZXOP2M3B5VTLF4SYDGMIBPHABE6LJFG77IU53QTSYGTKWAC TTJZWSWFENRMFQGSEEC4DKCLPFTDWJR5XHUGKEE34HT2RDGJV52AC // Write RIFF headerif _, err := file.WriteString("RIFF"); err != nil {return err}if err := binary.Write(file, binary.LittleEndian, uint32(totalSize)); err != nil {return err}if _, err := file.WriteString("WAVE"); err != nil {return err}
// Write 44-byte WAV header in one goheader := make([]byte, 44)copy(header[0:4], "RIFF")binary.LittleEndian.PutUint32(header[4:8], uint32(totalSize))copy(header[8:12], "WAVE")copy(header[12:16], "fmt ")binary.LittleEndian.PutUint32(header[16:20], 16) // chunk sizebinary.LittleEndian.PutUint16(header[20:22], 1) // PCM formatbinary.LittleEndian.PutUint16(header[22:24], uint16(channels))binary.LittleEndian.PutUint32(header[24:28], uint32(sampleRate))binary.LittleEndian.PutUint32(header[28:32], uint32(byteRate))binary.LittleEndian.PutUint16(header[32:34], uint16(blockAlign))binary.LittleEndian.PutUint16(header[34:36], uint16(bitsPerSample))copy(header[36:40], "data")binary.LittleEndian.PutUint32(header[40:44], uint32(dataSize))
if err := binary.Write(file, binary.LittleEndian, uint32(16)); err != nil { // chunk sizereturn err}if err := binary.Write(file, binary.LittleEndian, uint16(1)); err != nil { // PCM formatreturn err}if err := binary.Write(file, binary.LittleEndian, uint16(channels)); err != nil {return err}if err := binary.Write(file, binary.LittleEndian, uint32(sampleRate)); err != nil {return err}if err := binary.Write(file, binary.LittleEndian, uint32(byteRate)); err != nil {return err}if err := binary.Write(file, binary.LittleEndian, uint16(blockAlign)); err != nil {return err}if err := binary.Write(file, binary.LittleEndian, uint16(bitsPerSample)); err != nil {return err}// Write data chunk headerif _, err := file.WriteString("data"); err != nil {return err}if err := binary.Write(file, binary.LittleEndian, uint32(dataSize)); err != nil {return err}
// Convert to 16-bit signed integervalue := int16(sample * 32767)if err := binary.Write(file, binary.LittleEndian, value); err != nil {return err}
binary.LittleEndian.PutUint16(buf[i*2:], uint16(int16(sample*32767)))}if _, err := w.Write(buf); err != nil {return err
if srcRGBA, ok := img.(*image.RGBA); ok {result := image.NewRGBA(image.Rect(0, 0, newWidth, newHeight))for y := range newHeight {srcY := int(float64(y) * scaleY)if srcY >= srcHeight {srcY = srcHeight - 1}dstOff := y * result.StridesrcRowOff := srcY * srcRGBA.Stridefor x := range newWidth {srcX := int(float64(x) * scaleX)
if srcY >= srcHeight {srcY = srcHeight - 1}c := img.At(srcX+bounds.Min.X, srcY+bounds.Min.Y)gray := color.GrayModel.Convert(c).(color.Gray)result.SetGray(x, y, gray)
si := srcRowOff + srcX*4di := dstOff + x*4result.Pix[di] = srcRGBA.Pix[si]result.Pix[di+1] = srcRGBA.Pix[si+1]result.Pix[di+2] = srcRGBA.Pix[si+2]result.Pix[di+3] = srcRGBA.Pix[si+3]
// cached Hann windows by size, computed oncevar (hannCache = map[int][]float64{}hannCacheMu sync.RWMutex)func init() {// Pre-compute radix-2 factors for default window size to avoid lock contention under parallelismfft.EnsureRadix2Factors(512)}// getCachedHannWindow returns a cached Hann window of the given size.func getCachedHannWindow(size int) []float64 {hannCacheMu.RLock()if w, ok := hannCache[size]; ok {hannCacheMu.RUnlock()return w}hannCacheMu.RUnlock()
// Allocate power spectrum matrix (freq bins x time frames)powerMatrix := make([][]float64, numFreqBins)for i := range powerMatrix {powerMatrix[i] = make([]float64, numFrames)}
// Allocate power spectrum as flat backing slice (single allocation)powerFlat := make([]float64, numFreqBins*numFrames)
// Handle zeros (replace with smallest non-zero value)replaceZeros(powerMatrix)// Convert to dB, normalize, and convert to uint8return normalizeToUint8(powerMatrix)
// Fused normalization: replace zeros, convert to dB, find min/max, normalize to uint8// All in 2 passes instead of 6return normalizeFlat(powerFlat, numFreqBins, numFrames)
// replaceZeros replaces zero values with the smallest non-zero valuefunc replaceZeros(matrix [][]float64) {// Find smallest non-zero valueminNonZero := math.MaxFloat64for _, row := range matrix {for _, val := range row {if val > 0 && val < minNonZero {minNonZero = val}}
// normalizeFlat converts power values to dB, normalizes to 0-255, in 2 passes.// Operates on a flat slice laid out as [row0_col0, row0_col1, ..., row1_col0, ...].// Returns [][]uint8 with rows flipped vertically (low frequencies at bottom).func normalizeFlat(power []float64, rows, cols int) [][]uint8 {if rows == 0 || cols == 0 {return nil
// Replace zerosif minNonZero != math.MaxFloat64 {for i, row := range matrix {for j, val := range row {if val == 0 {matrix[i][j] = minNonZero}}
// Pass 1: find minNonZero, then convert power to dB in-place, tracking min/max dBminNonZero := math.MaxFloat64for _, val := range power {if val > 0 && val < minNonZero {minNonZero = val
for i, row := range powerMatrix {for j, power := range row {// Power to dB: 10 * log10(power)db := 10.0 * math.Log10(power)dbMatrix[i][j] = dbif db < minDB {minDB = db}if db > maxDB {maxDB = db}
for i, val := range power {if val <= 0 {val = minNonZero
for i, row := range dbMatrix {for j, db := range row {// Shift to non-negative, normalize to 0-1, then to 0-255normalized := (db - minDB) / rangeDBresult[i][j] = uint8(normalized * 255.0)
// Allocate result with flat backing slice (single allocation)resultFlat := make([]uint8, rows*cols)result := make([][]uint8, rows)for i := range result {// Flip: row i in result gets data from row (rows-1-i) in powersrcRow := rows - 1 - iresult[i] = resultFlat[i*cols : (i+1)*cols]srcOff := srcRow * colsfor j := range cols {result[i][j] = uint8((power[srcOff+j] - minDB) * scale)
// Process each .data filefor _, dataPath := range filePaths {clips, errs := processFile(dataPath, input.Output, input.Prefix, input.Filter, speciesName, callType, input.Certainty, imgSize, input.Color)output.SegmentsClipped += len(clips)output.OutputFiles = append(output.OutputFiles, clips...)output.Errors = append(output.Errors, errs...)if len(clips) > 0 || len(errs) == 0 {output.FilesProcessed++
// Process .data files (parallel for larger batches)if len(filePaths) <= 2 {// Sequential for small batchesfor _, dataPath := range filePaths {clips, errs := processFile(dataPath, input.Output, input.Prefix, input.Filter, speciesName, callType, input.Certainty, imgSize, input.Color)output.SegmentsClipped += len(clips)output.OutputFiles = append(output.OutputFiles, clips...)output.Errors = append(output.Errors, errs...)if len(clips) > 0 || len(errs) == 0 {output.FilesProcessed++}}} else {// Parallel file processingtype fileResult struct {clips []stringerrs []string}workers := min(runtime.NumCPU(), 8, len(filePaths))jobs := make(chan string, len(filePaths))results := make(chan fileResult, len(filePaths))var wg sync.WaitGroupfor range workers {wg.Add(1)go func() {defer wg.Done()for dataPath := range jobs {clips, errs := processFile(dataPath, input.Output, input.Prefix, input.Filter, speciesName, callType, input.Certainty, imgSize, input.Color)results <- fileResult{clips: clips, errs: errs}}}()}for _, dataPath := range filePaths {jobs <- dataPath}close(jobs)go func() {wg.Wait()close(results)}()for r := range results {output.SegmentsClipped += len(r.clips)output.OutputFiles = append(output.OutputFiles, r.clips...)output.Errors = append(output.Errors, r.errs...)if len(r.clips) > 0 || len(r.errs) == 0 {output.FilesProcessed++}
// Process each matching segmentfor _, seg := range matchingSegments {clipFiles, err := generateClip(samples, sampleRate, outputDir, prefix, basename, seg.StartTime, seg.EndTime, imgSize, color)if err != nil {errors = append(errors, fmt.Sprintf("%s: segment %.0f-%.0f: %v", dataPath, seg.StartTime, seg.EndTime, err))continue
// Process matching segments (parallel for larger batches)if len(matchingSegments) <= 2 {for _, seg := range matchingSegments {clipFiles, err := generateClip(samples, sampleRate, outputDir, prefix, basename, seg.StartTime, seg.EndTime, imgSize, color)if err != nil {errors = append(errors, fmt.Sprintf("%s: segment %.0f-%.0f: %v", dataPath, seg.StartTime, seg.EndTime, err))continue}clips = append(clips, clipFiles...)
clips = append(clips, clipFiles...)
} else {type segResult struct {clips []stringerr string}workers := min(runtime.NumCPU(), len(matchingSegments))jobs := make(chan *utils.Segment, len(matchingSegments))results := make(chan segResult, len(matchingSegments))var wg sync.WaitGroupfor range workers {wg.Add(1)go func() {defer wg.Done()for seg := range jobs {clipFiles, err := generateClip(samples, sampleRate, outputDir, prefix, basename, seg.StartTime, seg.EndTime, imgSize, color)if err != nil {results <- segResult{err: fmt.Sprintf("%s: segment %.0f-%.0f: %v", dataPath, seg.StartTime, seg.EndTime, err)}} else {results <- segResult{clips: clipFiles}}}}()}for _, seg := range matchingSegments {jobs <- seg}close(jobs)go func() {wg.Wait()close(results)}()for r := range results {if r.err != "" {errors = append(errors, r.err)} else {clips = append(clips, r.clips...)}}