1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
//! This module contains Google Dialogflow Session client
//! that can be used for streaming_detec_intent operation. This
//! involves streaming. As a result
//! this client is not cloneable.
#![allow(clippy::manual_map)]
use crate::api::grpc::google::cloud::dialogflow::v2beta1::{
    sessions_client::SessionsClient as GrpcSessionsClient, DetectIntentResponse,
    StreamingDetectIntentRequest, StreamingDetectIntentResponse,
};
use crate::common::{get_token, new_grpc_channel, new_interceptor, TokenInterceptor};
use crate::errors::Result;
use async_stream::try_stream;
use futures_core::stream::Stream;
use log::*;
use std::result::Result as StdResult;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tonic::codegen::InterceptedService;
use tonic::Response as TonicResponse;
use tonic::Status as TonicStatus;
use tonic::{transport::Channel, Streaming};

/// Google Dialogflow sessions client.
/// Used for streaming detect intent API. Is NOT cloneable!
#[derive(Debug)]
pub struct SessionsClient {
    /// internal GRPC dialogflow sessions client
    sessions_client: GrpcSessionsClient<InterceptedService<Channel, TokenInterceptor>>,

    /// channel for sending audio data
    audio_sender: Option<mpsc::Sender<StreamingDetectIntentRequest>>,

    /// channel for streaming audio data into GRPC API
    audio_receiver: Option<mpsc::Receiver<StreamingDetectIntentRequest>>,

    /// For channel based streaming this is the internal channel sender
    /// where STT results will be sent. Library client is using respective
    /// receiver to get the results.
    result_sender: Option<mpsc::Sender<StreamingDetectIntentResponse>>,
}

impl SessionsClient {
    /// Creates new sessions client using GCP project JSON credentials
    /// This client should be used for asynchronous invocation (streaming_detect_intent)
    /// See https://cloud.google.com/dialogflow/es/docs/how/detect-intent-stream
    pub async fn create(
        google_credentials: impl AsRef<str>,
        // initial configuration request
        streaming_detect_intent_req: StreamingDetectIntentRequest,
        // Capacity of audio sink (tokio channel used by caller to send audio data).
        // If not provided defaults to 1000.
        buffer_size: Option<usize>,
    ) -> Result<Self> {
        let channel = new_grpc_channel(
            "dialogflow.googleapis.com",
            "https://dialogflow.googleapis.com",
            None,
        )
        .await?;

        let token_header_val = get_token(google_credentials)?;

        let sessions_client =
            GrpcSessionsClient::with_interceptor(channel, new_interceptor(token_header_val));

        let (audio_sender, audio_receiver) =
            mpsc::channel::<StreamingDetectIntentRequest>(buffer_size.unwrap_or(1000));

        audio_sender.send(streaming_detect_intent_req).await?;

        Ok(SessionsClient {
            sessions_client,
            audio_sender: Some(audio_sender),
            audio_receiver: Some(audio_receiver),
            result_sender: None,
        })
    }

    /// Returns sender than can be used to stream in audio bytes. This method will take
    /// the sender out of the option leaving None in its place. No additional sender
    /// can be retrieved from session client after this call. When sender is dropped respective
    /// stream will be closed.
    pub fn get_audio_sink(&mut self) -> Option<mpsc::Sender<StreamingDetectIntentRequest>> {
        if let Some(audio_sender) = &self.audio_sender {
            Some(audio_sender.clone())
        } else {
            None
        }
    }

    /// Returns sender than can be used to stream in audio bytes. This method will take
    /// the sender out of the option leaving None in its place. No additional sender
    /// can be retrieved from session client after this call. When sender is dropped respective
    /// stream will be closed.
    pub fn take_audio_sink(&mut self) -> Option<mpsc::Sender<StreamingDetectIntentRequest>> {
        if let Some(audio_sender) = self.audio_sender.take() {
            Some(audio_sender)
        } else {
            None
        }
    }

    /// Drops audio sender so that respective stream can be closed.
    pub fn drop_audio_sink(&mut self) {
        self.audio_sender.take();
    }

    /// Returns receiver that can be used to receive streaming detect intents results
    pub fn get_streaming_result_receiver(
        &mut self,
        // buffer size for tokio channel. If not provided defaults to 1000.
        buffer_size: Option<usize>,
    ) -> mpsc::Receiver<StreamingDetectIntentResponse> {
        let (result_sender, result_receiver) =
            mpsc::channel::<StreamingDetectIntentResponse>(buffer_size.unwrap_or(1000));
        self.result_sender = Some(result_sender);
        result_receiver
    }

    /// Convenience function so that client does not have to create full StreamingDetectIntentRequest
    /// and can just pass audio bytes vector instead.
    #[allow(deprecated)]
    pub fn streaming_request_from_bytes(
        session: String,
        audio_bytes: Vec<u8>,
    ) -> StreamingDetectIntentRequest {
        StreamingDetectIntentRequest {
            session,
            query_params: None,
            query_input: None,
            // setting always to false. This should be set by user
            // in initial streaming config (see create_async)
            single_utterance: false,
            output_audio_config: None,
            output_audio_config_mask: None,
            input_audio: audio_bytes,
        }
    }

    /// Convenience function to check if DetectIntentResponse
    /// represents end of conversation. If so, returns true, otherwise false.
    pub fn is_eoc(response: &DetectIntentResponse) -> bool {
        super::is_eoc(response)
    }

    /// Convenience function to return properly formatted session string
    /// for detect intent call.
    pub fn get_session_string(project_id: &str, session_id: &str) -> String {
        super::get_session_string(project_id, session_id)
    }

    /// IMPORTANT: currently streaming_detect_intent does not work properly
    /// because half-close operation is not implemented. Details here(go example):
    /// https://cloud.google.com/dialogflow/es/docs/how/detect-intent-stream#detect-intent-stream-go
    #[allow(unreachable_code)]
    pub async fn streaming_detect_intent_async_stream(
        &mut self,
    ) -> impl Stream<Item = Result<StreamingDetectIntentResponse>> + '_ {
        try_stream! {
                // yank self.audio_receiver so that we can consume it
                if let Some(audio_receiver) = self.audio_receiver.take() {
                    let streaming_recognize_result: StdResult<
                        TonicResponse<Streaming<StreamingDetectIntentResponse>>,
                        TonicStatus,
                    > = self.sessions_client.streaming_detect_intent(ReceiverStream::new(audio_receiver)).await;

                    let mut response_stream: Streaming<StreamingDetectIntentResponse> =
                        streaming_recognize_result?.into_inner();

                    trace!("streaming_detect_intent_async_stream: entering loop");
                    while let Some(streaming_detect_intent_response) = response_stream.message().await? {
                        yield streaming_detect_intent_response;
                    }
                    trace!("streaming_detect_intent_async_stream: leaving loop");
                }
        }
    }

    /// Initiates bidirectional streaming. This call should be spawned
    /// into separate tokio task. Results can be then retrieved via
    /// channel receiver returned by method get_streaming_result_receiver.
    /// IMPORTANT: currently streaming_detect_intent does not work properly
    /// because half-close operation is not implemented. Details here(go example):
    /// https://cloud.google.com/dialogflow/es/docs/how/detect-intent-stream#detect-intent-stream-go
    pub async fn streaming_detect_intent(&mut self) -> Result<()> {
        // yank self.audio_receiver so that we can consume it
        if let Some(audio_receiver) = self.audio_receiver.take() {
            let streaming_recognize_result: StdResult<
                tonic::Response<Streaming<StreamingDetectIntentResponse>>,
                tonic::Status,
            > = self
                .sessions_client
                .streaming_detect_intent(ReceiverStream::new(audio_receiver))
                .await;

            let mut response_stream: Streaming<StreamingDetectIntentResponse> =
                streaming_recognize_result?.into_inner();

            while let Some(streaming_detect_intent_response) = response_stream.message().await? {
                if let Some(result_sender) = &self.result_sender {
                    result_sender.send(streaming_detect_intent_response).await?;
                }
            }
        }

        Ok(())
    }
}