| title | category | tags | difficulty | description | demonstrates | |||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Uninterruptable Agent |
basics |
|
beginner |
Agent configured to complete responses without user interruptions |
|
This example configures an agent to finish speaking even if the user talks over it by disabling interruptions. The agent also seeds the first user input so you can test the behavior immediately.
- Add a
.envin this directory with your LiveKit credentials:LIVEKIT_URL=your_livekit_url LIVEKIT_API_KEY=your_api_key LIVEKIT_API_SECRET=your_api_secret - Install dependencies:
pip install "livekit-agents[silero]" python-dotenv
Load environment variables so the audio plugins can authenticate. Create an AgentServer to manage sessions.
from dotenv import load_dotenv
from livekit.agents import JobContext, JobProcess, AgentServer, cli, Agent, AgentSession, inference
from livekit.plugins import silero
load_dotenv()
server = AgentServer()Preload the VAD model once per process to reduce connection latency.
def prewarm(proc: JobProcess):
proc.userdata["vad"] = silero.VAD.load()
server.setup_fnc = prewarmSet allow_interruptions=False when constructing the agent. The agent class is lightweight—only instructions and the interruption setting are defined here.
class UninterruptableAgent(Agent):
def __init__(self) -> None:
super().__init__(
instructions="""
You are a helpful assistant communicating through voice who is not interruptable.
""",
allow_interruptions=False
)
async def on_enter(self):
self.session.generate_reply(user_input="Say something somewhat long and boring so I can test if you're interruptable.")Create an AgentSession with STT/LLM/TTS/VAD configured, start the session with the agent, and connect to the room.
@server.rtc_session()
async def entrypoint(ctx: JobContext):
ctx.log_context_fields = {"room": ctx.room.name}
session = AgentSession(
stt=inference.STT(model="deepgram/nova-3-general"),
llm=inference.LLM(model="openai/gpt-4.1-mini"),
tts=inference.TTS(model="cartesia/sonic-3", voice="9626c31c-bec5-4cca-baa8-f8ba9e84c8bc"),
vad=ctx.proc.userdata["vad"],
preemptive_generation=True,
)
await session.start(agent=UninterruptableAgent(), room=ctx.room)
await ctx.connect()python uninterruptable.py consoleallow_interruptions=Falsekeeps TTS playback intact even if new speech arrives.on_enterseeds a first prompt so you can test the behavior without speaking first.- The rest of the media pipeline remains unchanged from a standard agent.
- This setting is useful when you want to ensure an announcement completes before listening again.
from dotenv import load_dotenv
from livekit.agents import JobContext, JobProcess, AgentServer, cli, Agent, AgentSession, inference
from livekit.plugins import silero
load_dotenv()
class UninterruptableAgent(Agent):
def __init__(self) -> None:
super().__init__(
instructions="""
You are a helpful assistant communicating through voice who is not interruptable.
""",
allow_interruptions=False
)
async def on_enter(self):
self.session.generate_reply(user_input="Say something somewhat long and boring so I can test if you're interruptable.")
server = AgentServer()
def prewarm(proc: JobProcess):
proc.userdata["vad"] = silero.VAD.load()
server.setup_fnc = prewarm
@server.rtc_session()
async def entrypoint(ctx: JobContext):
ctx.log_context_fields = {"room": ctx.room.name}
session = AgentSession(
stt=inference.STT(model="deepgram/nova-3-general"),
llm=inference.LLM(model="openai/gpt-4.1-mini"),
tts=inference.TTS(model="cartesia/sonic-3", voice="9626c31c-bec5-4cca-baa8-f8ba9e84c8bc"),
vad=ctx.proc.userdata["vad"],
preemptive_generation=True,
)
await session.start(agent=UninterruptableAgent(), room=ctx.room)
await ctx.connect()
if __name__ == "__main__":
cli.run_app(server)