deepgram.clients.agent.v1.websocket.options

  1# Copyright 2024 Deepgram SDK contributors. All Rights Reserved.
  2# Use of this source code is governed by a MIT license that can be found in the LICENSE file.
  3# SPDX-License-Identifier: MIT
  4
  5from typing import List, Optional, Union, Any, Tuple
  6import logging
  7
  8from dataclasses import dataclass, field
  9from dataclasses_json import config as dataclass_config
 10
 11from deepgram.utils import verboselogs
 12
 13from ...enums import AgentWebSocketEvents
 14from ....common import BaseResponse
 15
 16
 17# ConfigurationSettings
 18
 19
 20@dataclass
 21class Listen(BaseResponse):
 22    """
 23    This class defines any configuration settings for the Listen model.
 24    """
 25
 26    model: Optional[str] = field(
 27        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
 28    )
 29    keyterms: Optional[List[str]] = field(
 30        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
 31    )
 32
 33
 34@dataclass
 35class Speak(BaseResponse):
 36    """
 37    This class defines any configuration settings for the Speak model.
 38    """
 39
 40    model: Optional[str] = field(
 41        default="aura-asteria-en",
 42        metadata=dataclass_config(exclude=lambda f: f is None),
 43    )
 44    provider: Optional[str] = field(
 45        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
 46    )
 47    voice_id: Optional[str] = field(
 48        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
 49    )
 50
 51
 52@dataclass
 53class Header(BaseResponse):
 54    """
 55    This class defines a single key/value pair for a header.
 56    """
 57
 58    key: str
 59    value: str
 60
 61
 62@dataclass
 63class Item(BaseResponse):
 64    """
 65    This class defines a single item in a list of items.
 66    """
 67
 68    type: str
 69    description: str
 70
 71
 72@dataclass
 73class Properties(BaseResponse):
 74    """
 75    This class defines the properties which is just a list of items.
 76    """
 77
 78    item: Item
 79
 80    def __getitem__(self, key):
 81        _dict = self.to_dict()
 82        if "item" in _dict:
 83            _dict["item"] = [Item.from_dict(item) for item in _dict["item"]]
 84        return _dict[key]
 85
 86
 87@dataclass
 88class Parameters(BaseResponse):
 89    """
 90    This class defines the parameters for a function.
 91    """
 92
 93    type: str
 94    properties: Properties
 95    required: List[str]
 96
 97    def __getitem__(self, key):
 98        _dict = self.to_dict()
 99        if "properties" in _dict:
100            _dict["properties"] = _dict["properties"].copy()
101        return _dict[key]
102
103
104@dataclass
105class Function(BaseResponse):
106    """
107    This class defines a function for the Think model.
108    """
109
110    name: str
111    description: str
112    url: str
113    method: str
114    headers: Optional[List[Header]] = field(
115        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
116    )
117    parameters: Optional[Parameters] = field(
118        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
119    )
120
121    def __getitem__(self, key):
122        _dict = self.to_dict()
123        if "parameters" in _dict:
124            _dict["parameters"] = [
125                Parameters.from_dict(parameters) for parameters in _dict["parameters"]
126            ]
127        if "headers" in _dict:
128            _dict["headers"] = [
129                Header.from_dict(headers) for headers in _dict["headers"]
130            ]
131        return _dict[key]
132
133
134@dataclass
135class Provider(BaseResponse):
136    """
137    This class defines the provider for the Think model.
138    """
139
140    type: Optional[str] = field(
141        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
142    )
143
144
145@dataclass
146class Think(BaseResponse):
147    """
148    This class defines any configuration settings for the Think model.
149    """
150
151    provider: Provider = field(default_factory=Provider)
152    model: Optional[str] = field(
153        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
154    )
155    instructions: Optional[str] = field(
156        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
157    )
158    functions: Optional[List[Function]] = field(
159        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
160    )
161
162    def __getitem__(self, key):
163        _dict = self.to_dict()
164        if "provider" in _dict:
165            _dict["provider"] = [
166                Provider.from_dict(provider) for provider in _dict["provider"]
167            ]
168        if "functions" in _dict:
169            _dict["functions"] = [
170                Function.from_dict(functions) for functions in _dict["functions"]
171            ]
172        return _dict[key]
173
174
175@dataclass
176class Agent(BaseResponse):
177    """
178    This class defines any configuration settings for the Agent model.
179    """
180
181    listen: Listen = field(default_factory=Listen)
182    think: Think = field(default_factory=Think)
183    speak: Speak = field(default_factory=Speak)
184
185    def __getitem__(self, key):
186        _dict = self.to_dict()
187        if "listen" in _dict:
188            _dict["listen"] = [Listen.from_dict(listen) for listen in _dict["listen"]]
189        if "think" in _dict:
190            _dict["think"] = [Think.from_dict(think) for think in _dict["think"]]
191        if "speak" in _dict:
192            _dict["speak"] = [Speak.from_dict(speak) for speak in _dict["speak"]]
193        return _dict[key]
194
195
196@dataclass
197class Input(BaseResponse):
198    """
199    This class defines any configuration settings for the input audio.
200    """
201
202    encoding: Optional[str] = field(default="linear16")
203    sample_rate: int = field(default=16000)
204
205
206@dataclass
207class Output(BaseResponse):
208    """
209    This class defines any configuration settings for the output audio.
210    """
211
212    encoding: Optional[str] = field(default="linear16")
213    sample_rate: Optional[int] = field(default=16000)
214    bitrate: Optional[int] = field(
215        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
216    )
217    container: Optional[str] = field(default="none")
218
219
220@dataclass
221class Audio(BaseResponse):
222    """
223    This class defines any configuration settings for the audio.
224    """
225
226    input: Optional[Input] = field(default_factory=Input)
227    output: Optional[Output] = field(default_factory=Output)
228
229    def __getitem__(self, key):
230        _dict = self.to_dict()
231        if "input" in _dict:
232            _dict["input"] = [Input.from_dict(input) for input in _dict["input"]]
233        if "output" in _dict:
234            _dict["output"] = [Output.from_dict(output) for output in _dict["output"]]
235        return _dict[key]
236
237
238@dataclass
239class Context(BaseResponse):
240    """
241    This class defines any configuration settings for the context.
242    """
243
244    messages: Optional[List[Tuple[str, str]]] = field(
245        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
246    )
247    replay: Optional[bool] = field(default=False)
248
249    def __getitem__(self, key):
250        _dict = self.to_dict()
251        if "messages" in _dict:
252            _dict["messages"] = _dict["messages"].copy()
253        return _dict[key]
254
255
256@dataclass
257class SettingsConfigurationOptions(BaseResponse):
258    """
259    The client should send a SettingsConfiguration message immediately after opening the websocket and before sending any audio.
260    """
261
262    type: str = str(AgentWebSocketEvents.SettingsConfiguration)
263    audio: Audio = field(default_factory=Audio)
264    agent: Agent = field(default_factory=Agent)
265    context: Optional[Context] = field(
266        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
267    )
268
269    def __getitem__(self, key):
270        _dict = self.to_dict()
271        if "audio" in _dict:
272            _dict["audio"] = [Audio.from_dict(audio) for audio in _dict["audio"]]
273        if "agent" in _dict:
274            _dict["agent"] = [Agent.from_dict(agent) for agent in _dict["agent"]]
275        if "context" in _dict:
276            _dict["context"] = [
277                Context.from_dict(context) for context in _dict["context"]
278            ]
279        return _dict[key]
280
281    def check(self):
282        """
283        Check the options for any deprecated or soon-to-be-deprecated options.
284        """
285        logger = verboselogs.VerboseLogger(__name__)
286        logger.addHandler(logging.StreamHandler())
287        prev = logger.level
288        logger.setLevel(verboselogs.ERROR)
289
290        # do we need to check anything here?
291
292        logger.setLevel(prev)
293
294        return True
295
296
297# UpdateInstructions
298
299
300@dataclass
301class UpdateInstructionsOptions(BaseResponse):
302    """
303    The client can send an UpdateInstructions message to give additional instructions to the Think model in the middle of a conversation.
304    """
305
306    type: str = str(AgentWebSocketEvents.UpdateInstructions)
307    instructions: str = field(default="")
308
309
310# UpdateSpeak
311
312
313@dataclass
314class UpdateSpeakOptions(BaseResponse):
315    """
316    The client can send an UpdateSpeak message to change the Speak model in the middle of a conversation.
317    """
318
319    type: str = str(AgentWebSocketEvents.UpdateSpeak)
320    model: str = field(default="")
321
322
323# InjectAgentMessage
324
325
326@dataclass
327class InjectAgentMessageOptions(BaseResponse):
328    """
329    The client can send an InjectAgentMessage to immediately trigger an agent statement. If the injection request arrives while the user is speaking, or while the server is in the middle of sending audio for an agent response, then the request will be ignored and the server will reply with an InjectionRefused.
330    """
331
332    type: str = str(AgentWebSocketEvents.InjectAgentMessage)
333    message: str = field(default="")
334
335
336# Function Call Response
337
338
339@dataclass
340class FunctionCallResponse(BaseResponse):
341    """
342    TheFunctionCallResponse message is a JSON command that the client should reply with every time there is a FunctionCallRequest received.
343    """
344
345    type: str = "FunctionCallResponse"
346    function_call_id: str = field(default="")
347    output: str = field(default="")
348
349
350# Agent Keep Alive
351
352
353@dataclass
354class AgentKeepAlive(BaseResponse):
355    """
356    The KeepAlive message is a JSON command that you can use to ensure that the server does not close the connection.
357    """
358
359    type: str = "KeepAlive"
@dataclass
class Listen(deepgram.clients.common.v1.shared_response.BaseResponse):
21@dataclass
22class Listen(BaseResponse):
23    """
24    This class defines any configuration settings for the Listen model.
25    """
26
27    model: Optional[str] = field(
28        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
29    )
30    keyterms: Optional[List[str]] = field(
31        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
32    )

This class defines any configuration settings for the Listen model.

Listen(model: Optional[str] = None, keyterms: Optional[List[str]] = None)
model: Optional[str] = None
keyterms: Optional[List[str]] = None
@dataclass
class Speak(deepgram.clients.common.v1.shared_response.BaseResponse):
35@dataclass
36class Speak(BaseResponse):
37    """
38    This class defines any configuration settings for the Speak model.
39    """
40
41    model: Optional[str] = field(
42        default="aura-asteria-en",
43        metadata=dataclass_config(exclude=lambda f: f is None),
44    )
45    provider: Optional[str] = field(
46        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
47    )
48    voice_id: Optional[str] = field(
49        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
50    )

This class defines any configuration settings for the Speak model.

Speak( model: Optional[str] = 'aura-asteria-en', provider: Optional[str] = None, voice_id: Optional[str] = None)
model: Optional[str] = 'aura-asteria-en'
provider: Optional[str] = None
voice_id: Optional[str] = None
@dataclass
class Item(deepgram.clients.common.v1.shared_response.BaseResponse):
63@dataclass
64class Item(BaseResponse):
65    """
66    This class defines a single item in a list of items.
67    """
68
69    type: str
70    description: str

This class defines a single item in a list of items.

Item(type: str, description: str)
type: str
description: str
@dataclass
class Properties(deepgram.clients.common.v1.shared_response.BaseResponse):
73@dataclass
74class Properties(BaseResponse):
75    """
76    This class defines the properties which is just a list of items.
77    """
78
79    item: Item
80
81    def __getitem__(self, key):
82        _dict = self.to_dict()
83        if "item" in _dict:
84            _dict["item"] = [Item.from_dict(item) for item in _dict["item"]]
85        return _dict[key]

This class defines the properties which is just a list of items.

Properties(item: Item)
item: Item
@dataclass
class Parameters(deepgram.clients.common.v1.shared_response.BaseResponse):
 88@dataclass
 89class Parameters(BaseResponse):
 90    """
 91    This class defines the parameters for a function.
 92    """
 93
 94    type: str
 95    properties: Properties
 96    required: List[str]
 97
 98    def __getitem__(self, key):
 99        _dict = self.to_dict()
100        if "properties" in _dict:
101            _dict["properties"] = _dict["properties"].copy()
102        return _dict[key]

This class defines the parameters for a function.

Parameters( type: str, properties: Properties, required: List[str])
type: str
properties: Properties
required: List[str]
@dataclass
class Function(deepgram.clients.common.v1.shared_response.BaseResponse):
105@dataclass
106class Function(BaseResponse):
107    """
108    This class defines a function for the Think model.
109    """
110
111    name: str
112    description: str
113    url: str
114    method: str
115    headers: Optional[List[Header]] = field(
116        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
117    )
118    parameters: Optional[Parameters] = field(
119        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
120    )
121
122    def __getitem__(self, key):
123        _dict = self.to_dict()
124        if "parameters" in _dict:
125            _dict["parameters"] = [
126                Parameters.from_dict(parameters) for parameters in _dict["parameters"]
127            ]
128        if "headers" in _dict:
129            _dict["headers"] = [
130                Header.from_dict(headers) for headers in _dict["headers"]
131            ]
132        return _dict[key]

This class defines a function for the Think model.

Function( name: str, description: str, url: str, method: str, headers: Optional[List[Header]] = None, parameters: Optional[Parameters] = None)
name: str
description: str
url: str
method: str
headers: Optional[List[Header]] = None
parameters: Optional[Parameters] = None
@dataclass
class Provider(deepgram.clients.common.v1.shared_response.BaseResponse):
135@dataclass
136class Provider(BaseResponse):
137    """
138    This class defines the provider for the Think model.
139    """
140
141    type: Optional[str] = field(
142        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
143    )

This class defines the provider for the Think model.

Provider(type: Optional[str] = None)
type: Optional[str] = None
@dataclass
class Think(deepgram.clients.common.v1.shared_response.BaseResponse):
146@dataclass
147class Think(BaseResponse):
148    """
149    This class defines any configuration settings for the Think model.
150    """
151
152    provider: Provider = field(default_factory=Provider)
153    model: Optional[str] = field(
154        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
155    )
156    instructions: Optional[str] = field(
157        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
158    )
159    functions: Optional[List[Function]] = field(
160        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
161    )
162
163    def __getitem__(self, key):
164        _dict = self.to_dict()
165        if "provider" in _dict:
166            _dict["provider"] = [
167                Provider.from_dict(provider) for provider in _dict["provider"]
168            ]
169        if "functions" in _dict:
170            _dict["functions"] = [
171                Function.from_dict(functions) for functions in _dict["functions"]
172            ]
173        return _dict[key]

This class defines any configuration settings for the Think model.

Think( provider: Provider = <factory>, model: Optional[str] = None, instructions: Optional[str] = None, functions: Optional[List[Function]] = None)
provider: Provider
model: Optional[str] = None
instructions: Optional[str] = None
functions: Optional[List[Function]] = None
@dataclass
class Agent(deepgram.clients.common.v1.shared_response.BaseResponse):
176@dataclass
177class Agent(BaseResponse):
178    """
179    This class defines any configuration settings for the Agent model.
180    """
181
182    listen: Listen = field(default_factory=Listen)
183    think: Think = field(default_factory=Think)
184    speak: Speak = field(default_factory=Speak)
185
186    def __getitem__(self, key):
187        _dict = self.to_dict()
188        if "listen" in _dict:
189            _dict["listen"] = [Listen.from_dict(listen) for listen in _dict["listen"]]
190        if "think" in _dict:
191            _dict["think"] = [Think.from_dict(think) for think in _dict["think"]]
192        if "speak" in _dict:
193            _dict["speak"] = [Speak.from_dict(speak) for speak in _dict["speak"]]
194        return _dict[key]

This class defines any configuration settings for the Agent model.

Agent( listen: Listen = <factory>, think: Think = <factory>, speak: Speak = <factory>)
listen: Listen
think: Think
speak: Speak
@dataclass
class Input(deepgram.clients.common.v1.shared_response.BaseResponse):
197@dataclass
198class Input(BaseResponse):
199    """
200    This class defines any configuration settings for the input audio.
201    """
202
203    encoding: Optional[str] = field(default="linear16")
204    sample_rate: int = field(default=16000)

This class defines any configuration settings for the input audio.

Input(encoding: Optional[str] = 'linear16', sample_rate: int = 16000)
encoding: Optional[str] = 'linear16'
sample_rate: int = 16000
@dataclass
class Output(deepgram.clients.common.v1.shared_response.BaseResponse):
207@dataclass
208class Output(BaseResponse):
209    """
210    This class defines any configuration settings for the output audio.
211    """
212
213    encoding: Optional[str] = field(default="linear16")
214    sample_rate: Optional[int] = field(default=16000)
215    bitrate: Optional[int] = field(
216        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
217    )
218    container: Optional[str] = field(default="none")

This class defines any configuration settings for the output audio.

Output( encoding: Optional[str] = 'linear16', sample_rate: Optional[int] = 16000, bitrate: Optional[int] = None, container: Optional[str] = 'none')
encoding: Optional[str] = 'linear16'
sample_rate: Optional[int] = 16000
bitrate: Optional[int] = None
container: Optional[str] = 'none'
@dataclass
class Audio(deepgram.clients.common.v1.shared_response.BaseResponse):
221@dataclass
222class Audio(BaseResponse):
223    """
224    This class defines any configuration settings for the audio.
225    """
226
227    input: Optional[Input] = field(default_factory=Input)
228    output: Optional[Output] = field(default_factory=Output)
229
230    def __getitem__(self, key):
231        _dict = self.to_dict()
232        if "input" in _dict:
233            _dict["input"] = [Input.from_dict(input) for input in _dict["input"]]
234        if "output" in _dict:
235            _dict["output"] = [Output.from_dict(output) for output in _dict["output"]]
236        return _dict[key]

This class defines any configuration settings for the audio.

Audio( input: Optional[Input] = <factory>, output: Optional[Output] = <factory>)
input: Optional[Input]
output: Optional[Output]
@dataclass
class Context(deepgram.clients.common.v1.shared_response.BaseResponse):
239@dataclass
240class Context(BaseResponse):
241    """
242    This class defines any configuration settings for the context.
243    """
244
245    messages: Optional[List[Tuple[str, str]]] = field(
246        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
247    )
248    replay: Optional[bool] = field(default=False)
249
250    def __getitem__(self, key):
251        _dict = self.to_dict()
252        if "messages" in _dict:
253            _dict["messages"] = _dict["messages"].copy()
254        return _dict[key]

This class defines any configuration settings for the context.

Context( messages: Optional[List[Tuple[str, str]]] = None, replay: Optional[bool] = False)
messages: Optional[List[Tuple[str, str]]] = None
replay: Optional[bool] = False
@dataclass
class SettingsConfigurationOptions(deepgram.clients.common.v1.shared_response.BaseResponse):
257@dataclass
258class SettingsConfigurationOptions(BaseResponse):
259    """
260    The client should send a SettingsConfiguration message immediately after opening the websocket and before sending any audio.
261    """
262
263    type: str = str(AgentWebSocketEvents.SettingsConfiguration)
264    audio: Audio = field(default_factory=Audio)
265    agent: Agent = field(default_factory=Agent)
266    context: Optional[Context] = field(
267        default=None, metadata=dataclass_config(exclude=lambda f: f is None)
268    )
269
270    def __getitem__(self, key):
271        _dict = self.to_dict()
272        if "audio" in _dict:
273            _dict["audio"] = [Audio.from_dict(audio) for audio in _dict["audio"]]
274        if "agent" in _dict:
275            _dict["agent"] = [Agent.from_dict(agent) for agent in _dict["agent"]]
276        if "context" in _dict:
277            _dict["context"] = [
278                Context.from_dict(context) for context in _dict["context"]
279            ]
280        return _dict[key]
281
282    def check(self):
283        """
284        Check the options for any deprecated or soon-to-be-deprecated options.
285        """
286        logger = verboselogs.VerboseLogger(__name__)
287        logger.addHandler(logging.StreamHandler())
288        prev = logger.level
289        logger.setLevel(verboselogs.ERROR)
290
291        # do we need to check anything here?
292
293        logger.setLevel(prev)
294
295        return True

The client should send a SettingsConfiguration message immediately after opening the websocket and before sending any audio.

SettingsConfigurationOptions( type: str = 'SettingsConfiguration', audio: Audio = <factory>, agent: Agent = <factory>, context: Optional[Context] = None)
type: str = 'SettingsConfiguration'
audio: Audio
agent: Agent
context: Optional[Context] = None
def check(self):
282    def check(self):
283        """
284        Check the options for any deprecated or soon-to-be-deprecated options.
285        """
286        logger = verboselogs.VerboseLogger(__name__)
287        logger.addHandler(logging.StreamHandler())
288        prev = logger.level
289        logger.setLevel(verboselogs.ERROR)
290
291        # do we need to check anything here?
292
293        logger.setLevel(prev)
294
295        return True

Check the options for any deprecated or soon-to-be-deprecated options.

@dataclass
class UpdateInstructionsOptions(deepgram.clients.common.v1.shared_response.BaseResponse):
301@dataclass
302class UpdateInstructionsOptions(BaseResponse):
303    """
304    The client can send an UpdateInstructions message to give additional instructions to the Think model in the middle of a conversation.
305    """
306
307    type: str = str(AgentWebSocketEvents.UpdateInstructions)
308    instructions: str = field(default="")

The client can send an UpdateInstructions message to give additional instructions to the Think model in the middle of a conversation.

UpdateInstructionsOptions(type: str = 'UpdateInstructions', instructions: str = '')
type: str = 'UpdateInstructions'
instructions: str = ''
@dataclass
class UpdateSpeakOptions(deepgram.clients.common.v1.shared_response.BaseResponse):
314@dataclass
315class UpdateSpeakOptions(BaseResponse):
316    """
317    The client can send an UpdateSpeak message to change the Speak model in the middle of a conversation.
318    """
319
320    type: str = str(AgentWebSocketEvents.UpdateSpeak)
321    model: str = field(default="")

The client can send an UpdateSpeak message to change the Speak model in the middle of a conversation.

UpdateSpeakOptions(type: str = 'UpdateSpeak', model: str = '')
type: str = 'UpdateSpeak'
model: str = ''
@dataclass
class InjectAgentMessageOptions(deepgram.clients.common.v1.shared_response.BaseResponse):
327@dataclass
328class InjectAgentMessageOptions(BaseResponse):
329    """
330    The client can send an InjectAgentMessage to immediately trigger an agent statement. If the injection request arrives while the user is speaking, or while the server is in the middle of sending audio for an agent response, then the request will be ignored and the server will reply with an InjectionRefused.
331    """
332
333    type: str = str(AgentWebSocketEvents.InjectAgentMessage)
334    message: str = field(default="")

The client can send an InjectAgentMessage to immediately trigger an agent statement. If the injection request arrives while the user is speaking, or while the server is in the middle of sending audio for an agent response, then the request will be ignored and the server will reply with an InjectionRefused.

InjectAgentMessageOptions(type: str = 'InjectAgentMessage', message: str = '')
type: str = 'InjectAgentMessage'
message: str = ''
@dataclass
class FunctionCallResponse(deepgram.clients.common.v1.shared_response.BaseResponse):
340@dataclass
341class FunctionCallResponse(BaseResponse):
342    """
343    TheFunctionCallResponse message is a JSON command that the client should reply with every time there is a FunctionCallRequest received.
344    """
345
346    type: str = "FunctionCallResponse"
347    function_call_id: str = field(default="")
348    output: str = field(default="")

TheFunctionCallResponse message is a JSON command that the client should reply with every time there is a FunctionCallRequest received.

FunctionCallResponse( type: str = 'FunctionCallResponse', function_call_id: str = '', output: str = '')
type: str = 'FunctionCallResponse'
function_call_id: str = ''
output: str = ''
@dataclass
class AgentKeepAlive(deepgram.clients.common.v1.shared_response.BaseResponse):
354@dataclass
355class AgentKeepAlive(BaseResponse):
356    """
357    The KeepAlive message is a JSON command that you can use to ensure that the server does not close the connection.
358    """
359
360    type: str = "KeepAlive"

The KeepAlive message is a JSON command that you can use to ensure that the server does not close the connection.

AgentKeepAlive(type: str = 'KeepAlive')
type: str = 'KeepAlive'