Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions fix_sft_audio_mode_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# Fix the missing mode check in appendDotTrialTimeline

on_stim_start = """ postResponseContent: config.rtTask.enabled ? config.rtTask.postResponseContent : "stimulus",
onStimulusPhaseStart: () => {
const trial = trialProvider();
if (!trial || !audioService || config.audio.mode !== "audiovisual") return;
let targetSalience = 0;
const code = trial.stimCode;
if (code.length === 2) {
const audioChar = code[1]; // Assume channel 2 is audio
if (audioChar === 'H') targetSalience = trial.salience.high;
else if (audioChar === 'L') targetSalience = trial.salience.low;
else if (audioChar === 'x') targetSalience = 0;
else targetSalience = trial.salience.high; // fallback
} else {
targetSalience = trial.salience.high;
}

if (targetSalience > 0) {
const baseVol = config.audio.volume;
const vol = baseVol * targetSalience;
audioService.playTone(config.audio.frequencyHz, config.audio.durationMs, {
waveform: config.audio.waveform,
volume: vol
});
}
},
onResponse: (response: { key: string | null; rtMs: number | null }, data: Record<string, unknown>) => {"""

content = re.sub(r' postResponseContent: config\.rtTask\.enabled \? config\.rtTask\.postResponseContent : "stimulus",\n onStimulusPhaseStart: \(\) => \{\n const trial = trialProvider\(\);\n if \(!trial \|\| !audioService\) return;\n let targetSalience = 0;\n const code = trial\.stimCode;\n if \(code\.length === 2\) \{\n const audioChar = code\[1\]; // Assume channel 2 is audio\n if \(audioChar === \'H\'\) targetSalience = trial\.salience\.high;\n else if \(audioChar === \'L\'\) targetSalience = trial\.salience\.low;\n else if \(audioChar === \'x\'\) targetSalience = 0;\n else targetSalience = trial\.salience\.high; // fallback\n \} else \{\n targetSalience = trial\.salience\.high;\n \}\n \n if \(targetSalience > 0\) \{\n const baseVol = config\.audio\.volume;\n const vol = baseVol \* targetSalience;\n audioService\.playTone\(config\.audio\.frequencyHz, config\.audio\.durationMs, \{\n waveform: config\.audio\.waveform,\n volume: vol\n \}\);\n \}\n \},\n onResponse: \(response: \{ key: string \| null; rtMs: number \| null \}, data: Record<string, unknown>\) => \{', on_stim_start, content)


with open(file_path, "w") as f:
f.write(content)

print("Fixed audio mode check")
41 changes: 41 additions & 0 deletions fix_sft_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# Fix interface: find the end of interface SftParsedConfig and insert audio before it
# Revert previous replacement first
content = re.sub(r' audio: \{\n waveform: OscillatorType;\n frequencyHz: number;\n durationMs: number;\n volume: number;\n \};\n', '', content)

# Correctly insert audio into SftParsedConfig
sft_parsed_config_re = r'(interface SftParsedConfig \{[^}]+\n display: \{[^}]+\};)'
audio_interface = """
audio: {
waveform: OscillatorType;
frequencyHz: number;
durationMs: number;
volume: number;
};"""
content = re.sub(sft_parsed_config_re, r'\1' + audio_interface, content)

# Fix parseSftConfig audio parsing placement
content = re.sub(r'\n const stimulusAudioRaw = asObject\(stimulusRaw\?\.audio\);\n const audio = \{\n waveform: \(asString\(stimulusAudioRaw\?\.waveform\) \|\| "sine"\) as OscillatorType,\n frequencyHz: toPositiveNumber\(stimulusAudioRaw\?\.frequencyHz \?\? stimulusAudioRaw\?\.frequency_hz, 440\),\n durationMs: toPositiveNumber\(stimulusAudioRaw\?\.durationMs \?\? stimulusAudioRaw\?\.duration_ms, legacyTiming\.stimulusMs\),\n volume: toUnitNumber\(stimulusAudioRaw\?\.volume, 0\.25\),\n \};\n', '', content)

audio_parsing = """
const stimulusAudioRaw = asObject(stimulusRaw?.audio);
const audio = {
waveform: (asString(stimulusAudioRaw?.waveform) || "sine") as OscillatorType,
frequencyHz: toPositiveNumber(stimulusAudioRaw?.frequencyHz ?? stimulusAudioRaw?.frequency_hz, 440),
durationMs: toPositiveNumber(stimulusAudioRaw?.durationMs ?? stimulusAudioRaw?.duration_ms, legacyTiming.stimulusMs),
volume: toUnitNumber(stimulusAudioRaw?.volume, 0.25),
};
"""

content = re.sub(r'(const legacyTiming = \{[^}]+\};)', r'\1' + audio_parsing, content)


with open(file_path, "w") as f:
f.write(content)

print("Fixed SFT Config")
31 changes: 31 additions & 0 deletions fix_sft_config2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# Add audio to SftParsedConfig
audio_interface = """
audio: {
waveform: OscillatorType;
frequencyHz: number;
durationMs: number;
volume: number;
};"""

sft_parsed_config_re = r'(interface SftParsedConfig \{.*?\ndisplay: \{.*?\};)'

# Instead of using regex for adding the interface which seems tricky, let's just use string replace.
lines = content.split('\n')
for i, line in enumerate(lines):
if " display: {" in line and "SftParsedConfig" in "\n".join(lines[max(0, i-20):i]):
# Check if we already have it
if "audio: {" not in "\n".join(lines[i:i+20]):
lines.insert(i, " audio: {\n waveform: OscillatorType;\n frequencyHz: number;\n durationMs: number;\n volume: number;\n };")
break

content = "\n".join(lines)
with open(file_path, "w") as f:
f.write(content)

print("Fixed SFT Config 2")
50 changes: 50 additions & 0 deletions fix_sft_timeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# I messed up the regex substitution for appendDotTrialTimeline.
# It added the onStimulusPhaseStart inside the argument object of appendDotTrialTimeline instead of buildJsPsychRtTimelineNodes.
# Let's clean it up and add it correctly.

# The bad part was inserted at the end of the appendDotTrialTimeline object in runTrial. Let's revert and do it right.

content = re.sub(r' onStimulusPhaseStart: \(\) => \{\n const trial = trialProvider\(\);\n.*?\n \},\n onResponse:', ' onResponse:', content, flags=re.DOTALL)

# Let's manually replace it inside buildJsPsychRtTimelineNodes

on_stim_start = """ postResponseContent: config.rtTask.enabled ? config.rtTask.postResponseContent : "stimulus",
onStimulusPhaseStart: () => {
const trial = trialProvider();
if (!trial || !audioService) return;
let targetSalience = 0;
const code = trial.stimCode;
if (code.length === 2) {
const audioChar = code[1]; // Assume channel 2 is audio
if (audioChar === 'H') targetSalience = trial.salience.high;
else if (audioChar === 'L') targetSalience = trial.salience.low;
else if (audioChar === 'x') targetSalience = 0;
else targetSalience = trial.salience.high; // fallback
} else {
targetSalience = trial.salience.high;
}

if (targetSalience > 0) {
const baseVol = config.audio.volume;
const vol = baseVol * targetSalience;
audioService.playTone(config.audio.frequencyHz, config.audio.durationMs, {
waveform: config.audio.waveform,
volume: vol
});
}
},
onResponse: (response: { key: string | null; rtMs: number | null }, data: Record<string, unknown>) => {"""

content = re.sub(r' postResponseContent: config.rtTask.enabled \? config.rtTask.postResponseContent : "stimulus",\n onResponse: \(response: \{ key: string \| null; rtMs: number \| null \}, data: Record<string, unknown>\) => \{', on_stim_start, content)


with open(file_path, "w") as f:
f.write(content)

print("Fixed SFT Timeline")
7 changes: 7 additions & 0 deletions packages/core/src/engines/jspsychRtTask.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ export interface JsPsychRtTimelineConfig {
};
postResponseContent?: "blank" | "stimulus";
onResponse?: (response: { key: string | null; rtMs: number | null }, data: Record<string, unknown>) => void;
onStimulusPhaseStart?: () => void;
}

export function initStandardJsPsych(args: {
Expand Down Expand Up @@ -69,6 +70,7 @@ export function buildJsPsychRtTimelineNodes(config: JsPsychRtTimelineConfig): an
feedback,
postResponseContent = "stimulus",
onResponse,
onStimulusPhaseStart,
} = config;

const timeline: any[] = [];
Expand Down Expand Up @@ -155,6 +157,11 @@ export function buildJsPsychRtTimelineNodes(config: JsPsychRtTimelineConfig): an
response_ends_trial: responseTerminatesTrial,
trial_duration: Math.max(0, Math.round(segment.durationMs)),
data: { ...baseData, phase: segment.phase },
on_start: () => {
if (segment.showStimulus && onStimulusPhaseStart && !responseSeen) {
onStimulusPhaseStart();
}
},
on_finish: (data: Record<string, unknown>) => {
if (!responseSeen) {
const response = extractJsPsychTrialResponse(data);
Expand Down
16 changes: 16 additions & 0 deletions patch_dots_audio.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# Let's see if we can add logic to avoid drawing the auditory dot if it's meant to be an auditory channel.
# Wait, SFT is a redundancy task. Does it have to be visual?
# "a valid sft config could be a redundant target task with one auditory stimulus and one visual stimulus... just like the two visual stimuli."
# So if it's auditory, maybe channel 2 shouldn't be drawn at all!
# We can determine if the task has an auditory channel by checking `config.audio.enabled`? We didn't add `enabled`. Let's just say if it's "audiovisual" mode.
# Or we can just add `audio: { enabled: boolean; ... }` to `SftParsedConfig`.
# Actually, the user asked to "Ensure the stimulus can be used with the staircase module and that it can be integrated seamlessly such that a valid sft config could be a redundant target task with one auditory stimulus and one visual stimulus (that can be presented together or in isolation...)"
# Let's add `mode: "visual" | "audiovisual"` to the display config or just infer it if audio config is present.
# It might be simpler: if `stimulus.audio.enabled` is true, then channel "B" (index 1) is the auditory stimulus.
# Or we can add a config flag: `config.stimulus.mode = "audiovisual"`.
37 changes: 37 additions & 0 deletions patch_jspsych_rt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import re

file_path = "packages/core/src/engines/jspsychRtTask.ts"
with open(file_path, "r") as f:
content = f.read()

# Add onStimulusPhaseStart to JsPsychRtTimelineConfig
config_addition = """ onResponse?: (response: { key: string | null; rtMs: number | null }, data: Record<string, unknown>) => void;
onStimulusPhaseStart?: () => void;
"""
content = re.sub(r' onResponse\?: \(response: \{ key: string \| null; rtMs: number \| null \}, data: Record<string, unknown>\) => void;\n', config_addition, content)

# Add onStimulusPhaseStart extraction
destructure_addition = """ postResponseContent = "stimulus",
onResponse,
onStimulusPhaseStart,
"""
content = re.sub(r' postResponseContent = "stimulus",\n onResponse,\n', destructure_addition, content)


# Add on_start callback to response segments
on_start_addition = """
data: { ...baseData, phase: segment.phase },
on_start: () => {
if (segment.showStimulus && onStimulusPhaseStart && !responseSeen) {
onStimulusPhaseStart();
}
},
on_finish: (data: Record<string, unknown>) => {"""

content = re.sub(r'\n data: \{ \.\.\.baseData, phase: segment\.phase \},\n on_finish: \(data: Record<string, unknown>\) => \{', on_start_addition, content)


with open(file_path, "w") as f:
f.write(content)

print("Patched jsPsychRtTask successfully")
44 changes: 44 additions & 0 deletions patch_sft_audio_mode.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# Add mode to SftParsedConfig
audio_interface = """
audio: {
mode: "visual" | "audiovisual";
waveform: OscillatorType;
"""
content = re.sub(r'\n audio: \{\n waveform: OscillatorType;', audio_interface, content)


# Add mode parsing
audio_parsing = """
const stimulusAudioRaw = asObject(stimulusRaw?.audio);
const audioMode = (asString(stimulusAudioRaw?.mode) || "visual").toLowerCase() === "audiovisual" ? "audiovisual" : "visual";
const audio = {
mode: audioMode as "visual" | "audiovisual",
waveform: (asString(stimulusAudioRaw?.waveform) || "sine") as OscillatorType,"""
content = re.sub(r'\n const stimulusAudioRaw = asObject\(stimulusRaw\?\.audio\);\n const audio = \{\n waveform: \(asString\(stimulusAudioRaw\?\.waveform\) \|\| "sine"\) as OscillatorType,', audio_parsing, content)


# Update dotsFromStimCode to not draw the second dot if audiovisual
# First, pass config to dotsFromStimCode or just use mode
content = re.sub(r'const dots = dotsFromStimCode\(trial\.stimCode, trial\.salience\);', r'const dots = dotsFromStimCode(trial.stimCode, trial.salience, config.audio.mode);', content)

# Then update dotsFromStimCode function
dots_func = """function dotsFromStimCode(stimCode: string, salience: { high: number; low: number }, mode: "visual" | "audiovisual"): Array<{ loc: "A" | "B"; luminance: number }> {
const [a, b] = normalizeStimCode(stimCode).split("");
const dots: Array<{ loc: "A" | "B"; luminance: number }> = [];
if (a !== "x") dots.push({ loc: "A", luminance: a === "H" ? salience.high : salience.low });
if (mode === "visual" && b !== "x") dots.push({ loc: "B", luminance: b === "H" ? salience.high : salience.low });
return dots;
}"""
content = re.sub(r'function dotsFromStimCode\(stimCode: string, salience: \{ high: number; low: number \}\): Array<\{ loc: "A" \| "B"; luminance: number \}> \{\n const \[a, b\] = normalizeStimCode\(stimCode\)\.split\(""\);\n const dots: Array<\{ loc: "A" \| "B"; luminance: number \}> = \[\];\n if \(a !== "x"\) dots\.push\(\{ loc: "A", luminance: a === "H" \? salience\.high : salience\.low \}\);\n if \(b !== "x"\) dots\.push\(\{ loc: "B", luminance: b === "H" \? salience\.high : salience\.low \}\);\n return dots;\n\}', dots_func, content)


with open(file_path, "w") as f:
f.write(content)

print("Patched audio mode")
37 changes: 37 additions & 0 deletions patch_sft_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import re

file_path = "tasks/sft/src/index.ts"
with open(file_path, "r") as f:
content = f.read()

# 1. Update SftParsedConfig interface
interface_addition = """ audio: {
waveform: OscillatorType;
frequencyHz: number;
durationMs: number;
volume: number;
};
"""
content = re.sub(r'(interface SftParsedConfig \{[^\}]+display: \{[^\}]+\};)', r'\1\n' + interface_addition, content)

# 2. Update parseSftConfig function
audio_parsing_addition = """
const stimulusAudioRaw = asObject(stimulusRaw?.audio);
const audio = {
waveform: (asString(stimulusAudioRaw?.waveform) || "sine") as OscillatorType,
frequencyHz: toPositiveNumber(stimulusAudioRaw?.frequencyHz ?? stimulusAudioRaw?.frequency_hz, 440),
durationMs: toPositiveNumber(stimulusAudioRaw?.durationMs ?? stimulusAudioRaw?.duration_ms, legacyTiming.stimulusMs),
volume: toUnitNumber(stimulusAudioRaw?.volume, 0.25),
};
"""
content = re.sub(r'(const legacyTiming = \{)', audio_parsing_addition + r'\n \1', content)

# 3. Add to return object of parseSftConfig
return_addition = """ audio,
"""
content = re.sub(r'(display: \{[^\}]+\},)', r'\1\n' + return_addition, content)

with open(file_path, "w") as f:
f.write(content)

print("Patched SFT Config successfully")
Loading