T2WZBTVFHVWPKL6AKEWSEVQBR3HWWWUPUNUP2MULF4WXEAZP46KQC err := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {if err != nil {return err}
if err := filepath.Walk(rootPath, makeWalkFunc(&results, opts, rootPath)); err != nil {return nil, err}} else {if err := scanFlatDir(rootPath, &results, opts); err != nil {return nil, err}}
// Skip hidden files/directoriesif opts.SkipHidden && strings.HasPrefix(name, ".") && path != rootPath {if info.IsDir() {return filepath.SkipDir}return nil}
// makeWalkFunc returns a filepath.WalkFunc that collects matching files.func makeWalkFunc(results *[]string, opts FindFilesOptions, rootPath string) filepath.WalkFunc {return func(path string, info os.FileInfo, err error) error {if err != nil {return err}name := info.Name()
// Check directory skip prefixesif info.IsDir() && path != rootPath {for _, prefix := range opts.SkipPrefixes {if strings.HasPrefix(name, prefix) {return filepath.SkipDir}}return nil
// Skip hidden files/directoriesif opts.SkipHidden && strings.HasPrefix(name, ".") && path != rootPath {if info.IsDir() {return filepath.SkipDir
// Check fileif !info.IsDir() {if strings.ToLower(filepath.Ext(name)) == extTarget && info.Size() >= opts.MinSize {results = append(results, path)
// Check directory skip prefixesif info.IsDir() && path != rootPath {for _, prefix := range opts.SkipPrefixes {if strings.HasPrefix(name, prefix) {return filepath.SkipDir
if strings.ToLower(filepath.Ext(name)) == extTarget {path := filepath.Join(rootPath, name)if info, err := os.Stat(path); err == nil && info.Size() >= opts.MinSize {results = append(results, path)}}}
// fileMatches checks if a file name meets the extension and size criteria.func fileMatches(name string, info os.FileInfo, opts FindFilesOptions) bool {if !strings.EqualFold(filepath.Ext(name), opts.Extension) {return false}if info.Size() < opts.MinSize {return false
sort.Strings(results)return results, nil
// scanFlatDir scans a single directory level for matching files.func scanFlatDir(rootPath string, results *[]string, opts FindFilesOptions) error {entries, err := os.ReadDir(rootPath)if err != nil {return err}for _, entry := range entries {if entry.IsDir() {continue}name := entry.Name()if opts.SkipHidden && strings.HasPrefix(name, ".") {continue}if !strings.EqualFold(filepath.Ext(name), opts.Extension) {continue}path := filepath.Join(rootPath, name)if info, err := os.Stat(path); err == nil && info.Size() >= opts.MinSize {*results = append(*results, path)}}return nil
// Validate query starts with SELECT or WITHif !selectPattern.MatchString(input.Query) {return ExecuteSQLOutput{}, fmt.Errorf("only SELECT and WITH queries are allowed")}
limit := resolveLimit(input.Limit)query, autoAddedLimit := applyLimit(input.Query, limit)
// Check for forbidden keywords (defense in depth - database is already read-only)if forbiddenPattern.MatchString(input.Query) {return ExecuteSQLOutput{}, fmt.Errorf("query contains forbidden keywords (INSERT/UPDATE/DELETE/DROP/CREATE/ALTER)")
database, err := db.OpenReadOnlyDB(dbPath)if err != nil {return ExecuteSQLOutput{}, fmt.Errorf("database connection failed: %w", err)
// Determine row limitlimit := defaultLimitif input.Limit != nil {if *input.Limit < 1 || *input.Limit > maxLimit {return ExecuteSQLOutput{}, fmt.Errorf("limit must be between 1 and %d", maxLimit)}limit = *input.Limit
rows, err := executeSQLQuery(ctx, database, query, input.Parameters)if err != nil {return ExecuteSQLOutput{}, err
// Add LIMIT clause if not present// Query for limit+1 rows to detect truncationquery := input.QueryautoAddedLimit := falseif !limitPattern.MatchString(query) {query = fmt.Sprintf("%s LIMIT %d", strings.TrimSpace(query), limit+1)autoAddedLimit = true
columnInfo, columns, err := buildColumnInfo(rows)if err != nil {return ExecuteSQLOutput{}, err
// Execute query with parametersvar rows *sql.Rowsif len(input.Parameters) > 0 {rows, err = database.QueryContext(ctx, query, input.Parameters...)} else {rows, err = database.QueryContext(ctx, query)
// Handle empty results (return empty array, not error)if results == nil {results = []map[string]any{}
if err != nil {return ExecuteSQLOutput{}, fmt.Errorf("query execution failed: %w", err)
// Detect truncation: if we auto-added limit+1 and got more than limit rowslimited := falseif autoAddedLimit && len(results) > limit {limited = trueresults = results[:limit]
// Get column metadata
// validateSQLQuery checks the query is a safe SELECT/WITH statement.func validateSQLQuery(query string, limit *int) error {if strings.TrimSpace(query) == "" {return fmt.Errorf("query cannot be empty")}if !selectPattern.MatchString(query) {return fmt.Errorf("only SELECT and WITH queries are allowed")}if forbiddenPattern.MatchString(query) {return fmt.Errorf("query contains forbidden keywords (INSERT/UPDATE/DELETE/DROP/CREATE/ALTER)")}if limit != nil {if *limit < 1 || *limit > maxLimit {return fmt.Errorf("limit must be between 1 and %d", maxLimit)}}return nil}// resolveLimit returns the effective row limit from input or default.func resolveLimit(limit *int) int {if limit != nil {return *limit}return defaultLimit}// applyLimit appends a LIMIT clause if not already present.// Returns the modified query and whether a limit was auto-added.func applyLimit(query string, limit int) (string, bool) {if !limitPattern.MatchString(query) {return fmt.Sprintf("%s LIMIT %d", strings.TrimSpace(query), limit+1), true}return query, false}// executeSQLQuery runs the query and returns the result rows.func executeSQLQuery(ctx context.Context, database *sql.DB, query string, params []any) (*sql.Rows, error) {if len(params) > 0 {return database.QueryContext(ctx, query, params...)}return database.QueryContext(ctx, query)}// buildColumnInfo extracts column metadata from the result set.func buildColumnInfo(rows *sql.Rows) ([]ColumnInfo, []string, error) {
// Handle empty results (return empty array, not error)if results == nil {results = []map[string]any{}}// Detect truncation: if we auto-added limit+1 and got more than limit rowslimited := falseif autoAddedLimit && len(results) > limit {limited = trueresults = results[:limit]}// Build the query string to report (show effective limit, not internal limit+1)queryReported := query
// buildQueryReported constructs the query string to report in output.func buildQueryReported(originalQuery string, autoAddedLimit bool, limit int) string {
queryReported = fmt.Sprintf("%s LIMIT %d", strings.TrimSpace(input.Query), limit)}// Create output structureoutput := ExecuteSQLOutput{Rows: results,RowCount: len(results),Columns: columnInfo,Limited: limited,Query: queryReported,
return fmt.Sprintf("%s LIMIT %d", strings.TrimSpace(originalQuery), limit)
if err := validateLocationFields(input); err != nil {
// fetchLocationByID scans a full location row from a query that selects// id, dataset_id, name, latitude, longitude, description, created_at, last_modified, active, timezone_id.func fetchLocationByID(ctx context.Context, queryer interface {QueryRowContext(context.Context, string, ...any) *sql.Row}, id string) (db.Location, error) {const selectCols = "SELECT id, dataset_id, name, latitude, longitude, description, created_at, last_modified, active, timezone_id FROM location WHERE id = ?"var loc db.Locationerr := queryer.QueryRowContext(ctx, selectCols, id).Scan(&loc.ID, &loc.DatasetID, &loc.Name, &loc.Latitude, &loc.Longitude,&loc.Description, &loc.CreatedAt, &loc.LastModified, &loc.Active, &loc.TimezoneID)return loc, err}func createLocation(ctx context.Context, input LocationInput) (LocationOutput, error) {var output LocationOutputif err := validateCreateFields(input); err != nil {
// Verify dataset exists and is activevar datasetExists, datasetActive boolerr = tx.QueryRowContext(ctx,"SELECT EXISTS(SELECT 1 FROM dataset WHERE id = ?), COALESCE((SELECT active FROM dataset WHERE id = ?), false)",*input.DatasetID, *input.DatasetID,).Scan(&datasetExists, &datasetActive)if err != nil {return output, fmt.Errorf("failed to verify dataset: %w", err)}if !datasetExists {return output, fmt.Errorf("dataset with ID '%s' does not exist", *input.DatasetID)
if err := verifyDatasetExistsAndActive(ctx, tx, *input.DatasetID); err != nil {return output, err
// Location with this name already exists in dataset - return existing (consistent duplicate handling)var location db.Locationerr = tx.QueryRowContext(ctx,"SELECT id, dataset_id, name, latitude, longitude, description, created_at, last_modified, active, timezone_id FROM location WHERE id = ?",existingID,).Scan(&location.ID, &location.DatasetID, &location.Name, &location.Latitude, &location.Longitude,&location.Description, &location.CreatedAt, &location.LastModified, &location.Active, &location.TimezoneID)if err != nil {return output, fmt.Errorf("failed to fetch existing location: %w", err)}if err = tx.Commit(); err != nil {return output, fmt.Errorf("failed to commit transaction: %w", err)}output.Location = locationoutput.Message = fmt.Sprintf("Location '%s' already exists in dataset (ID: %s) - returning existing location", location.Name, location.ID)return output, nil
return returnExistingLocation(ctx, tx, existingID, output)
var location db.Locationerr = tx.QueryRowContext(ctx,"SELECT id, dataset_id, name, latitude, longitude, description, created_at, last_modified, active, timezone_id FROM location WHERE id = ?",id,).Scan(&location.ID, &location.DatasetID, &location.Name, &location.Latitude, &location.Longitude,&location.Description, &location.CreatedAt, &location.LastModified, &location.Active, &location.TimezoneID)
location, err := fetchLocationByID(ctx, tx, id)
// returnExistingLocation handles the case where a location already exists in the dataset.func returnExistingLocation(ctx context.Context, tx *db.LoggedTx, existingID string, output LocationOutput) (LocationOutput, error) {location, err := fetchLocationByID(ctx, tx, existingID)if err != nil {return output, fmt.Errorf("failed to fetch existing location: %w", err)}if err = tx.Commit(); err != nil {return output, fmt.Errorf("failed to commit transaction: %w", err)}output.Location = locationoutput.Message = fmt.Sprintf("Location '%s' already exists in dataset (ID: %s) - returning existing location", location.Name, location.ID)
// verifyDatasetExistsAndActive checks that a dataset exists and is active.func verifyDatasetExistsAndActive(ctx context.Context, queryer interface {QueryRowContext(context.Context, string, ...any) *sql.Row}, datasetID string) error {var exists, active boolerr := queryer.QueryRowContext(ctx,"SELECT EXISTS(SELECT 1 FROM dataset WHERE id = ?), COALESCE((SELECT active FROM dataset WHERE id = ?), false)",datasetID, datasetID,).Scan(&exists, &active)if err != nil {return fmt.Errorf("failed to verify dataset: %w", err)}if !exists {return fmt.Errorf("dataset with ID '%s' does not exist", datasetID)}if !active {return fmt.Errorf("dataset (ID: %s) is not active", datasetID)}return nil}
// Verify location exists and check active statusvar exists, active boolvar currentDatasetID stringerr = database.QueryRow("SELECT EXISTS(SELECT 1 FROM location WHERE id = ?), COALESCE((SELECT active FROM location WHERE id = ?), false), COALESCE((SELECT dataset_id FROM location WHERE id = ?), '')",locationID, locationID, locationID,).Scan(&exists, &active, ¤tDatasetID)if err != nil {return output, fmt.Errorf("failed to query location: %w", err)}if !exists {return output, fmt.Errorf("location not found: %s", locationID)}if !active {return output, fmt.Errorf("location '%s' is not active (cannot update inactive locations)", locationID)
if err := verifyLocationExistsAndActive(database, locationID); err != nil {return output, err
var datasetExists, datasetActive boolerr = database.QueryRow("SELECT EXISTS(SELECT 1 FROM dataset WHERE id = ?), COALESCE((SELECT active FROM dataset WHERE id = ?), false)",*input.DatasetID, *input.DatasetID,).Scan(&datasetExists, &datasetActive)
if err := verifyDatasetExistsAndActive(context.Background(), database, *input.DatasetID); err != nil {return output, err}}updates, args, err := buildLocationUpdates(input, locationID)if err != nil {return output, err}query := fmt.Sprintf("UPDATE location SET %s WHERE id = ?", strings.Join(updates, ", "))// Begin logged transaction for updatetx, err := db.BeginLoggedTx(ctx, database, "create_or_update_location")if err != nil {return output, fmt.Errorf("failed to begin transaction: %w", err)}defer func() {
if !datasetExists {return output, fmt.Errorf("dataset not found: %s", *input.DatasetID)}if !datasetActive {return output, fmt.Errorf("dataset '%s' is not active", *input.DatasetID)}
}()_, err = tx.ExecContext(ctx, query, args...)if err != nil {return output, fmt.Errorf("failed to update location: %w", err)}// Fetch the updated locationlocation, err := fetchLocationByID(ctx, tx, locationID)if err != nil {return output, fmt.Errorf("failed to fetch updated location: %w", err)}if err = tx.Commit(); err != nil {return output, fmt.Errorf("failed to commit transaction: %w", err)}output.Location = locationoutput.Message = fmt.Sprintf("Successfully updated location '%s' (ID: %s)", location.Name, location.ID)return output, nil}// verifyLocationExistsAndActive checks that a location exists and is active.func verifyLocationExistsAndActive(queryer interface {QueryRow(string, ...any) *sql.Row}, locationID string) error {var exists, active boolerr := queryer.QueryRow("SELECT EXISTS(SELECT 1 FROM location WHERE id = ?), COALESCE((SELECT active FROM location WHERE id = ?), false)",locationID, locationID,).Scan(&exists, &active)if err != nil {return fmt.Errorf("failed to query location: %w", err)}if !exists {return fmt.Errorf("location not found: %s", locationID)}if !active {return fmt.Errorf("location '%s' is not active (cannot update inactive locations)", locationID)
query := fmt.Sprintf("UPDATE location SET %s WHERE id = ?", strings.Join(updates, ", "))// Begin logged transaction for updatetx, err := db.BeginLoggedTx(ctx, database, "create_or_update_location")if err != nil {return output, fmt.Errorf("failed to begin transaction: %w", err)}defer func() {if err != nil {tx.Rollback()}}()_, err = tx.ExecContext(ctx, query, args...)if err != nil {return output, fmt.Errorf("failed to update location: %w", err)}// Fetch the updated locationvar location db.Locationerr = tx.QueryRow("SELECT id, dataset_id, name, latitude, longitude, description, created_at, last_modified, active, timezone_id FROM location WHERE id = ?",locationID,).Scan(&location.ID, &location.DatasetID, &location.Name, &location.Latitude, &location.Longitude,&location.Description, &location.CreatedAt, &location.LastModified, &location.Active, &location.TimezoneID)if err != nil {return output, fmt.Errorf("failed to fetch updated location: %w", err)}if err = tx.Commit(); err != nil {return output, fmt.Errorf("failed to commit transaction: %w", err)}output.Location = locationoutput.Message = fmt.Sprintf("Successfully updated location '%s' (ID: %s)", location.Name, location.ID)return output, nil
return updates, args, nil
// Verify dataset exists and get name/typevar datasetName, datasetType stringerr = sourceDB.QueryRowContext(ctx,"SELECT name, type FROM dataset WHERE id = ? AND active = true",input.DatasetID,).Scan(&datasetName, &datasetType)
datasetName, err := verifyExportDataset(ctx, sourceDB, input)
return output, fmt.Errorf("cannot export dataset of type '%s': only structured datasets are supported", datasetType)}// Check if output file existsif !input.DryRun {if _, err := os.Stat(input.Output); err == nil && !input.Force {sourceDB.Close()return output, fmt.Errorf("output file exists: %s (use --force to overwrite)", input.Output)}
return output, err
// Calculate row counts for each tablefor _, tr := range orderedTables {count, err := countTableRows(ctx, sourceDB, tr, input.DatasetID)if err != nil {sourceDB.Close()return output, fmt.Errorf("failed to count rows in %s: %w", tr.Table, err)}if count > 0 {output.RowCounts[tr.Table] = count}
if err := countAllTableRows(ctx, sourceDB, orderedTables, input.DatasetID, &output); err != nil {sourceDB.Close()return output, err
// Create output directory if neededoutputDir := filepath.Dir(input.Output)if outputDir != "" && outputDir != "." {if err := os.MkdirAll(outputDir, 0755); err != nil {return output, fmt.Errorf("failed to create output directory: %w", err)}
if err := createOutputDir(input.Output); err != nil {return output, err
// Copy data in FK orderfor _, tr := range orderedTables {if tr.Relation == "copy" {// Copy entire table as-iserr = copyTableAsIs(ctx, outputDB, tr.Table)} else {// Owned or owned-via: filter by dataseterr = copyTableData(ctx, outputDB, tr, input.DatasetID)}if err != nil {return output, fmt.Errorf("failed to copy %s: %w", tr.Table, err)}
if err := copyDataToOutput(ctx, outputDB, orderedTables, input.DatasetID); err != nil {return output, err
// checkOutputFile returns an error if the output file already exists and force is not set.func checkOutputFile(input ExportDatasetInput) error {if input.DryRun {return nil}if _, err := os.Stat(input.Output); err == nil && !input.Force {return fmt.Errorf("output file exists: %s (use --force to overwrite)", input.Output)}return nil}// verifyExportDataset checks the dataset exists, is active, and is structured.func verifyExportDataset(ctx context.Context, sourceDB *sql.DB, input ExportDatasetInput) (string, error) {var datasetName, datasetType stringerr := sourceDB.QueryRowContext(ctx,"SELECT name, type FROM dataset WHERE id = ? AND active = true",input.DatasetID,).Scan(&datasetName, &datasetType)if err != nil {return "", fmt.Errorf("dataset not found: %s", input.DatasetID)}if datasetType != "structured" {return "", fmt.Errorf("cannot export dataset of type '%s': only structured datasets are supported", datasetType)}return datasetName, nil}// getOrderedTableManifest returns the dataset tables sorted by FK dependency.func getOrderedTableManifest(sourceDB *sql.DB) ([]TableRelationship, error) {fkOrder, err := db.GetFKOrder(sourceDB)if err != nil {return nil, fmt.Errorf("failed to compute table order: %w", err)}return orderByFKDependency(datasetTables, fkOrder), nil}// countAllTableRows populates output.RowCounts for all tables in the manifest.func countAllTableRows(ctx context.Context, sourceDB *sql.DB, tables []TableRelationship, datasetID string, output *ExportDatasetOutput) error {for _, tr := range tables {count, err := countTableRows(ctx, sourceDB, tr, datasetID)if err != nil {return fmt.Errorf("failed to count rows in %s: %w", tr.Table, err)}if count > 0 {output.RowCounts[tr.Table] = count}}return nil}// createOutputDir creates the output directory if needed.func createOutputDir(outputPath string) error {outputDir := filepath.Dir(outputPath)if outputDir != "" && outputDir != "." {if err := os.MkdirAll(outputDir, 0755); err != nil {return fmt.Errorf("failed to create output directory: %w", err)}}return nil}// copyDataToOutput attaches the source DB and copies all tables in FK order.func copyDataToOutput(ctx context.Context, outputDB *sql.DB, tables []TableRelationship, datasetID string) error {_, err := outputDB.ExecContext(ctx, fmt.Sprintf("ATTACH '%s' AS source", dbPath))if err != nil {return fmt.Errorf("failed to attach source database: %w", err)}for _, tr := range tables {if tr.Relation == "copy" {err = copyTableAsIs(ctx, outputDB, tr.Table)} else {err = copyTableData(ctx, outputDB, tr, datasetID)}if err != nil {return fmt.Errorf("failed to copy %s: %w", tr.Table, err)}}return nil}
// Collect ALL labeled segments per model — no scope filtering here.// Scope is applied to anchor selection only, so a "Don't Know" label in model[1]// against a "Kiwi" anchor in model[0] is correctly surfaced as a label_mismatch.modelSegs := make(map[string][]labeledSeg, len(models))for _, seg := range df.Segments {for _, lbl := range seg.Labels {for _, model := range models {if lbl.Filter == model {modelSegs[model] = append(modelSegs[model], labeledSeg{seg: seg, label: lbl})break}}}}
modelSegs := collectModelSegments(df, models)
if len(scope) > 0 {key := anchor.label.Speciesif anchor.label.CallType != "" {key += "+" + anchor.label.CallType}if !scope[key] && !scope[anchor.label.Species] {continue
if !inScope(anchor, scope) {continue}if matches := findOverlappingMatches(anchor, models, modelSegs); matches == nil {continue} else {group := buildComparisonGroup(anchor, models, matches)if a := checkGroupAnomaly(group, path, models); a != nil {anomalies = append(anomalies, *a)
// Find overlapping segments in every other model.matches := make(map[string][]labeledSeg, len(models)-1)lonely := falsefor _, model := range models[1:] {for _, candidate := range modelSegs[model] {if overlaps(anchor.seg, candidate.seg) {matches[model] = append(matches[model], candidate)
// collectModelSegments groups labeled segments by model filter name.func collectModelSegments(df *utils.DataFile, models []string) map[string][]labeledSeg {modelSegs := make(map[string][]labeledSeg, len(models))for _, seg := range df.Segments {for _, lbl := range seg.Labels {for _, model := range models {if lbl.Filter == model {modelSegs[model] = append(modelSegs[model], labeledSeg{seg: seg, label: lbl})break
// Build comparison group: anchor + first overlapping match per other model// (consistent with propagate's approach).group := []labeledSeg{anchor}for _, model := range models[1:] {group = append(group, matches[model][0])}
// inScope returns true if the anchor's label is within the species scope filter.func inScope(anchor labeledSeg, scope map[string]bool) bool {if len(scope) == 0 {return true}key := anchor.label.Speciesif anchor.label.CallType != "" {key += "+" + anchor.label.CallType}return scope[key] || scope[anchor.label.Species]}
// Check species+calltype agreement.refSpecies := group[0].label.SpeciesrefCallType := group[0].label.CallTypelabelMatch := truefor _, ls := range group[1:] {if ls.label.Species != refSpecies || ls.label.CallType != refCallType {labelMatch = falsebreak
// findOverlappingMatches returns matches[model] = overlapping segments from that model,// or nil if any model has no overlap (lonely anchor).func findOverlappingMatches(anchor labeledSeg, models []string, modelSegs map[string][]labeledSeg) map[string][]labeledSeg {matches := make(map[string][]labeledSeg, len(models)-1)for _, model := range models[1:] {for _, candidate := range modelSegs[model] {if overlaps(anchor.seg, candidate.seg) {matches[model] = append(matches[model], candidate)
if !labelMatch {anomalies = append(anomalies, Anomaly{File: path, Type: "label_mismatch", Segments: buildAnomalySegs(group, models)})continue}
// buildComparisonGroup assembles anchor + first match per other model.func buildComparisonGroup(anchor labeledSeg, models []string, matches map[string][]labeledSeg) []labeledSeg {group := []labeledSeg{anchor}for _, model := range models[1:] {group = append(group, matches[model][0])}return group}
// Labels agree — check certainty.refCertainty := group[0].label.Certaintyfor _, ls := range group[1:] {if ls.label.Certainty != refCertainty {anomalies = append(anomalies, Anomaly{File: path, Type: "certainty_mismatch", Segments: buildAnomalySegs(group, models)})break}
// checkGroupAnomaly checks a comparison group for label or certainty mismatches.func checkGroupAnomaly(group []labeledSeg, path string, models []string) *Anomaly {refSpecies := group[0].label.SpeciesrefCallType := group[0].label.CallTypefor _, ls := range group[1:] {if ls.label.Species != refSpecies || ls.label.CallType != refCallType {a := Anomaly{File: path, Type: "label_mismatch", Segments: buildAnomalySegs(group, models)}return &a
return anomalies
refCertainty := group[0].label.Certaintyfor _, ls := range group[1:] {if ls.label.Certainty != refCertainty {a := Anomaly{File: path, Type: "certainty_mismatch", Segments: buildAnomalySegs(group, models)}return &a}}return nil
// Resolve segments against the mapping. Skip:// - filter mismatch (when --filter set)// - annotation duration < min_label_overlap// - species not in mappingsegs := make([]resolvedSeg, 0, len(df.Segments))for _, seg := range df.Segments {if seg.EndTime-seg.StartTime < input.MinLabelOverlap {
// resolveSegments maps segments to their classification and filters out mismatches.func resolveSegments(segments []*utils.Segment,filter string,minLabelOverlap float64,mapping utils.MappingFile,classIdx map[string]int,out *CallsClipLabelsOutput,) []resolvedSeg {segs := make([]resolvedSeg, 0, len(segments))for _, seg := range segments {if seg.EndTime-seg.StartTime < minLabelOverlap {
// Compute relative path for the WAV file.wavName := strings.TrimSuffix(filepath.Base(path), ".data")
// computeWavRelPath computes the relative path from cwd to the WAV file corresponding to a .data file.func computeWavRelPath(dataPath, cwd, folderAbs string) (string, error) {wavName := strings.TrimSuffix(filepath.Base(dataPath), ".data")
// Label each clip window.
// labelClipWindows classifies each clip window and builds the output rows.func labelClipWindows(windows []utils.ClipWindow, segs []resolvedSeg, rel string, classes []string, minLabelOverlap float64, out *CallsClipLabelsOutput) []clipLabelsRow {