commit d31151756bf00c9587d4e4c9035f51ad94a51acd Author: Daniel McKnight Date: Thu Oct 3 23:36:49 2024 -0700 Initial Implementation diff --git a/applet.js b/applet.js new file mode 100644 index 0000000..ff495d8 --- /dev/null +++ b/applet.js @@ -0,0 +1,338 @@ +const Applet = imports.ui.applet; +const Gio = imports.gi.Gio; +const GLib = imports.gi.GLib; +const Lang = imports.lang; +const Mainloop = imports.mainloop; +const Soup = imports.gi.Soup; +const St = imports.gi.St; +const Settings = imports.ui.settings; + +function MyApplet(metadata, orientation, panel_height, instance_id) { + this._init(metadata, orientation, panel_height, instance_id); +} + +MyApplet.prototype = { + __proto__: Applet.IconApplet.prototype, + + _init: function(metadata, orientation, panel_height, instance_id) { + Applet.IconApplet.prototype._init.call(this, orientation, panel_height, instance_id); + + this.metadata = metadata; + this.settings = new Settings.AppletSettings(this, metadata.uuid, instance_id); + + this.settings.bindProperty( + Settings.BindingDirection.IN, + "baseUrl", + "baseUrl", + this.onSettingsChanged, + null + ); + + this.settings.bindProperty( + Settings.BindingDirection.IN, + "token", + "token", + this.onSettingsChanged, + null + ); + + this._setCustomIcon(metadata.path + "/icon.svg"); + this.set_applet_tooltip(_("Voice Assistant")); + + this._streamSocket = null; + this._nodeSocket = null; + this._isRecording = false; + this._recorder = null; + this._player = null; + this._audioBuffer = new Uint8Array(0); + this._playbackBuffer = []; + + global.log("[Voice Assistant] Applet initialized"); + this._initSockets(); + }, + + onSettingsChanged: function() { + global.log("[Voice Assistant] Settings changed, reinitializing sockets"); + if (this._streamSocket) { + this._streamSocket.close(Soup.WebsocketCloseCode.NORMAL, null); + } + if (this._nodeSocket) { + this._nodeSocket.close(Soup.WebsocketCloseCode.NORMAL, null); + } + this._initSockets(); + }, + + _setCustomIcon: function(iconPath) { + try { + let file = Gio.File.new_for_path(iconPath); + let gicon = new Gio.FileIcon({ file: file }); + this.set_applet_icon_symbolic_name(iconPath); + this._applet_icon.gicon = gicon; + } catch (e) { + global.logError("[Voice Assistant] Error setting custom icon: " + e.message); + // Fallback to the default icon if there's an error + this.set_applet_icon_name("microphone-sensitivity-medium"); + } + }, + + _initSockets: function() { + global.log("[Voice Assistant] Initializing WebSockets"); + let maxPayloadSize = 10 * 1024 * 1024; // 10 MB in bytes + + const STREAM_SOCKET_URL = `wss://${this.baseUrl}/node/v1/stream?token=${this.token}`; + const NODE_SOCKET_URL = `wss://${this.baseUrl}/node/v1?token=${this.token}`; + + // Initialize Node WebSocket + try { + let session = new Soup.Session(); + let message = new Soup.Message({ + method: 'GET', + uri: GLib.Uri.parse(NODE_SOCKET_URL, GLib.UriFlags.NONE) + }); + + session.websocket_connect_async(message, null, null, null, null, (session, result) => { + try { + this._nodeSocket = session.websocket_connect_finish(result); + this._nodeSocket.set_max_incoming_payload_size(maxPayloadSize); + this._nodeSocket.connect('message', Lang.bind(this, this._onNodeMessage)); + this._nodeSocket.connect('error', Lang.bind(this, this._onSocketError)); + global.log("[Voice Assistant] Node WebSocket initialized"); + } catch (e) { + global.logError("[Voice Assistant] Error finalizing Node WebSocket: " + e.message); + } + }); + + // Initialize streaming WebSocket + try { + let streamSession = new Soup.Session(); + let streamMessage = new Soup.Message({ + method: 'GET', + uri: GLib.Uri.parse(STREAM_SOCKET_URL, GLib.UriFlags.NONE) + }); + + streamSession.websocket_connect_async(streamMessage, null, null, null, null, (streamSession, streamResult) => { + try { + this._streamSocket = streamSession.websocket_connect_finish(streamResult); + this._streamSocket.connect('message', Lang.bind(this, this._onStreamMessage)); + this._streamSocket.connect('error', Lang.bind(this, this._onSocketError)); + global.log("[Voice Assistant] Stream WebSocket initialized"); + } catch (e) { + global.logError("[Voice Assistant] Error finalizing stream WebSocket: " + e.message); + } + }); + } catch (e) { + global.logError("[Voice Assistant] Error initializing stream WebSocket: " + e.message); + } + } catch (e) { + global.logError("[Voice Assistant] Error initializing Node WebSocket: " + e.message); + } + }, + + _onSocketError: function(socket, error) { + global.logError("[Voice Assistant] WebSocket error: " + error.message); + try { + this._streamSocket.close(); + this._nodeSocket.close(); + } finally { + this._initSockets(); + } + }, + + _onNodeMessage: function(connection, type, message) { + try { + if (type === Soup.WebsocketDataType.TEXT) { + let data = message.get_data(); + let jsonData = JSON.parse(data); + global.log("[Voice Assistant] Parsed node message: " + jsonData.type); + // Handle text messages if needed + } else { + global.log("[Voice Assistant] Received unknown data type from node: " + type); + } + } catch (e) { + global.logError("[Voice Assistant] Error handling node message: " + e.message); + } + }, + + _onStreamMessage: function(connection, type, message) { + try { + if (type === Soup.WebsocketDataType.BINARY) { + global.log("[Voice Assistant] Received binary audio data of length: " + message.get_data().length); + this._playbackBuffer.push(message.get_data()); + this._playAudio(); + } else { + global.log("[Voice Assistant] Received unknown data type from stream: " + type); + } + } catch (e) { + global.logError("[Voice Assistant] Error handling stream message: " + e.message); + } + }, + + _playAudio: function() { + if (this._player) { + // If a player is already running, just add the new data to the buffer + return; + } + + if (this._playbackBuffer.length === 0) { + return; + } + + let audioData = this._playbackBuffer.shift(); + + // Create a temporary file to store the audio data + let [file, stream] = Gio.File.new_tmp("voice-assistant-XXXXXX"); + stream.output_stream.write_all(audioData, null); + stream.close(null); + + // Play the audio using GStreamer + this._player = new Gio.Subprocess({ + argv: [ + 'gst-launch-1.0', + 'filesrc', 'location=' + file.get_path(), + '!', 'decodebin', + '!', 'audioconvert', + '!', 'audioresample', + '!', 'autoaudiosink' + ], + flags: Gio.SubprocessFlags.NONE + }); + + this._player.init(null); + + // Clean up when playback is finished + this._player.wait_async(null, (source, result) => { + try { + source.wait_finish(result); + } catch (e) { + global.logError("[Voice Assistant] Error during audio playback: " + e.message); + } finally { + this._player = null; + file.delete(null); + // Play the next audio chunk if available + this._playAudio(); + } + }); + }, + + _startRecording: function() { + if (this._isRecording) return; + + this._isRecording = true; + this._setCustomIcon(this.metadata.path + "/icon-active.svg"); + global.log("[Voice Assistant] Starting recording"); + + try { + // Initialize GStreamer pipeline for recording + this._recorder = new Gio.Subprocess({ + argv: [ + 'gst-launch-1.0', + 'pulsesrc', + '!', + 'audioconvert', + '!', + 'audio/x-raw,format=S16LE,channels=1,rate=16000', + '!', + 'fdsink' + ], + flags: Gio.SubprocessFlags.STDOUT_PIPE + }); + + this._recorder.init(null); + global.log("[Voice Assistant] Recording subprocess initialized"); + + // Read audio data and send it over WebSocket + let stdout = this._recorder.get_stdout_pipe(); + this._readAudioData(stdout); + } catch (e) { + global.logError("[Voice Assistant] Error starting recording: " + e.message); + } + }, + + _readAudioData: function(stdout) { + stdout.read_bytes_async(4096, GLib.PRIORITY_DEFAULT, null, (source, result) => { + try { + let bytes = source.read_bytes_finish(result); + if (bytes && bytes.get_size() > 0) { + // Append new data to the existing buffer + let newData = new Uint8Array(bytes.get_data()); + let combinedBuffer = new Uint8Array(this._audioBuffer.length + newData.length); + combinedBuffer.set(this._audioBuffer); + combinedBuffer.set(newData, this._audioBuffer.length); + this._audioBuffer = combinedBuffer; + + // If we have accumulated 4096 or more bytes, send them + while (this._audioBuffer.length >= 4096) { + let chunkToSend = this._audioBuffer.slice(0, 4096); + this._streamSocket.send_binary(chunkToSend); +// global.log("[Voice Assistant] Sent 4096 bytes of audio data"); + + // Keep the remaining data in the buffer + this._audioBuffer = this._audioBuffer.slice(4096); + } + + // Continue reading + this._readAudioData(stdout); + } else { + global.log("[Voice Assistant] End of audio stream reached"); +// // Send any remaining data in the buffer +// if (this._audioBuffer.length > 0) { +// this._streamSocket.send_binary(this._audioBuffer); +// global.log("[Voice Assistant] Sent final " + this._audioBuffer.length + " bytes of audio data"); +// } + this._stopRecording(); + } + } catch (e) { + global.logError("[Voice Assistant] Error reading audio data: " + e.message); + this._stopRecording(); + } + }); + }, + + _stopRecording: function() { + if (!this._isRecording) return; + + this._isRecording = false; + this._setCustomIcon(this.metadata.path + "/icon.svg"); + global.log("[Voice Assistant] Stopping recording"); + + if (this._recorder) { + this._recorder.force_exit(); + this._recorder = null; + global.log("[Voice Assistant] Recording subprocess terminated"); + } + // Clear the audio buffer + this._audioBuffer = new Uint8Array(0); + }, + + on_applet_clicked: function() { + if (this._isRecording) { + global.log("[Voice Assistant] Applet clicked: stopping recording"); + this._stopRecording(); + } else { + global.log("[Voice Assistant] Applet clicked: starting recording"); + this._startRecording(); + } + }, + + on_applet_removed_from_panel: function() { + global.log("[Voice Assistant] Applet removed from panel"); + this._stopRecording(); + if (this._streamSocket) { + this._streamSocket.close(Soup.WebsocketCloseCode.NORMAL, null); + global.log("[Voice Assistant] Record WebSocket closed"); + } + if (this._nodeSocket) { + this._nodeSocket.close(Soup.WebsocketCloseCode.NORMAL, null); + global.log("[Voice Assistant] Node WebSocket closed"); + } + if (this._player) { + this._player.force_exit(); + global.log("[Voice Assistant] Audio player terminated"); + } + } +}; + +function main(metadata, orientation, panel_height, instance_id) { + global.log("[Voice Assistant] Main function called"); + return new MyApplet(metadata, orientation, panel_height, instance_id); +} diff --git a/applet.js.noconfig b/applet.js.noconfig new file mode 100644 index 0000000..5152c89 --- /dev/null +++ b/applet.js.noconfig @@ -0,0 +1,308 @@ +const Applet = imports.ui.applet; +const Gio = imports.gi.Gio; +const GLib = imports.gi.GLib; +const Lang = imports.lang; +const Mainloop = imports.mainloop; +const Soup = imports.gi.Soup; +const St = imports.gi.St; + +const STREAM_SOCKET_URL = 'wss://hana.neonaialpha.com/node/v1/stream?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnRfaWQiOiIwNjQ1N2FlMC1lMjQ0LTQxNjMtOWQ1NS0xNWFhMGNiYTQ5NDYiLCJ1c2VybmFtZSI6Im5lb24iLCJwYXNzd29yZCI6Im5lb24iLCJwZXJtaXNzaW9ucyI6eyJhc3Npc3QiOnRydWUsImJhY2tlbmQiOnRydWUsIm5vZGUiOnRydWV9LCJleHBpcmUiOjE3MjgxMDEzOTUuMjE3MTMyM30.BO3ymPLDg2v8epVxdnaf0iLh9DJnVSTZT_hM1M--V84'; +const NODE_SOCKET_URL = 'wss://hana.neonaialpha.com/node/v1?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjbGllbnRfaWQiOiIwNjQ1N2FlMC1lMjQ0LTQxNjMtOWQ1NS0xNWFhMGNiYTQ5NDYiLCJ1c2VybmFtZSI6Im5lb24iLCJwYXNzd29yZCI6Im5lb24iLCJwZXJtaXNzaW9ucyI6eyJhc3Npc3QiOnRydWUsImJhY2tlbmQiOnRydWUsIm5vZGUiOnRydWV9LCJleHBpcmUiOjE3MjgxMDEzOTUuMjE3MTMyM30.BO3ymPLDg2v8epVxdnaf0iLh9DJnVSTZT_hM1M--V84'; + +function MyApplet(metadata, orientation, panel_height, instance_id) { + this._init(metadata, orientation, panel_height, instance_id); +} + +MyApplet.prototype = { + __proto__: Applet.IconApplet.prototype, + + _init: function(metadata, orientation, panel_height, instance_id) { + Applet.IconApplet.prototype._init.call(this, orientation, panel_height, instance_id); + + this.metadata = metadata; + // Set the custom SVG icon + this._setCustomIcon(metadata.path + "/icon.svg"); + this.set_applet_tooltip(_("Voice Assistant")); + + this._streamSocket = null; + this._nodeSocket = null; + this._isRecording = false; + this._recorder = null; + this._player = null; + this._audioBuffer = new Uint8Array(0); + this._playbackBuffer = []; // Initialize the playback buffer + + global.log("[Voice Assistant] Applet initialized"); + this._initSockets(); + }, + + _setCustomIcon: function(iconPath) { + try { + let file = Gio.File.new_for_path(iconPath); + let gicon = new Gio.FileIcon({ file: file }); + this.set_applet_icon_symbolic_name(iconPath); + this._applet_icon.gicon = gicon; + } catch (e) { + global.logError("[Voice Assistant] Error setting custom icon: " + e.message); + // Fallback to the default icon if there's an error + this.set_applet_icon_name("microphone-sensitivity-medium"); + } + }, + + _initSockets: function() { + global.log("[Voice Assistant] Initializing WebSockets"); + let maxPayloadSize = 10 * 1024 * 1024; // 10 MB in bytes + // Initialize Node WebSocket + try { + let session = new Soup.Session(); + let message = new Soup.Message({ + method: 'GET', + uri: GLib.Uri.parse(NODE_SOCKET_URL, GLib.UriFlags.NONE) + }); + + session.websocket_connect_async(message, null, null, null, null, (session, result) => { + try { + this._nodeSocket = session.websocket_connect_finish(result); + // Set the maximum incoming payload size + this._nodeSocket.set_max_incoming_payload_size(maxPayloadSize); + this._nodeSocket.connect('message', Lang.bind(this, this._onNodeMessage)); + this._nodeSocket.connect('error', Lang.bind(this, this._onSocketError)); + global.log("[Voice Assistant] Node WebSocket initialized"); + } catch (e) { + global.logError("[Voice Assistant] Error finalizing Node WebSocket: " + e.message); + } + }); + // Initialize streaming WebSocket + try { + let session = new Soup.Session(); + let message = new Soup.Message({ + method: 'GET', + uri: GLib.Uri.parse(STREAM_SOCKET_URL, GLib.UriFlags.NONE) + }); + + session.websocket_connect_async(message, null, null, null, null, (session, result) => { + try { + this._streamSocket = session.websocket_connect_finish(result); + this._streamSocket.connect('message', Lang.bind(this, this._onStreamMessage)); + this._streamSocket.connect('error', Lang.bind(this, this._onSocketError)); + global.log("[Voice Assistant] Stream WebSocket initialized"); + } catch (e) { + global.logError("[Voice Assistant] Error finalizing stream WebSocket: " + e.message); + } + }); + } catch (e) { + global.logError("[Voice Assistant] Error initializing stream WebSocket: " + e.message); + } + } catch (e) { + global.logError("[Voice Assistant] Error initializing Node WebSocket: " + e.message); + } + }, + + _onSocketError: function(socket, error) { + global.logError("[Voice Assistant] WebSocket error: " + error.message); + try { + this._streamSocket.close(); + this._nodeSocket.close(); + } finally { + this._initSockets(); + } + }, + + _onNodeMessage: function(connection, type, message) { + try { + if (type === Soup.WebsocketDataType.TEXT) { + let data = message.get_data(); + let jsonData = JSON.parse(data); + global.log("[Voice Assistant] Parsed node message: " + jsonData.type); + // Handle text messages if needed + } else { + global.log("[Voice Assistant] Received unknown data type from node: " + type); + } + } catch (e) { + global.logError("[Voice Assistant] Error handling node message: " + e.message); + } + }, + + _onStreamMessage: function(connection, type, message) { + try { + if (type === Soup.WebsocketDataType.BINARY) { + global.log("[Voice Assistant] Received binary audio data of length: " + message.get_data().length); + this._playbackBuffer.push(message.get_data()); + this._playAudio(); + } else { + global.log("[Voice Assistant] Received unknown data type from stream: " + type); + } + } catch (e) { + global.logError("[Voice Assistant] Error handling stream message: " + e.message); + } + }, + + _playAudio: function() { + if (this._player) { + // If a player is already running, just add the new data to the buffer + return; + } + + if (this._playbackBuffer.length === 0) { + return; + } + + let audioData = this._playbackBuffer.shift(); + + // Create a temporary file to store the audio data + let [file, stream] = Gio.File.new_tmp("voice-assistant-XXXXXX"); + stream.output_stream.write_all(audioData, null); + stream.close(null); + + // Play the audio using GStreamer + this._player = new Gio.Subprocess({ + argv: [ + 'gst-launch-1.0', + 'filesrc', 'location=' + file.get_path(), + '!', 'decodebin', + '!', 'audioconvert', + '!', 'audioresample', + '!', 'autoaudiosink' + ], + flags: Gio.SubprocessFlags.NONE + }); + + this._player.init(null); + + // Clean up when playback is finished + this._player.wait_async(null, (source, result) => { + try { + source.wait_finish(result); + } catch (e) { + global.logError("[Voice Assistant] Error during audio playback: " + e.message); + } finally { + this._player = null; + file.delete(null); + // Play the next audio chunk if available + this._playAudio(); + } + }); + }, + + _startRecording: function() { + if (this._isRecording) return; + + this._isRecording = true; + this._setCustomIcon(this.metadata.path + "/icon-active.svg"); + global.log("[Voice Assistant] Starting recording"); + + try { + // Initialize GStreamer pipeline for recording + this._recorder = new Gio.Subprocess({ + argv: [ + 'gst-launch-1.0', + 'pulsesrc', + '!', + 'audioconvert', + '!', + 'audio/x-raw,format=S16LE,channels=1,rate=16000', + '!', + 'fdsink' + ], + flags: Gio.SubprocessFlags.STDOUT_PIPE + }); + + this._recorder.init(null); + global.log("[Voice Assistant] Recording subprocess initialized"); + + // Read audio data and send it over WebSocket + let stdout = this._recorder.get_stdout_pipe(); + this._readAudioData(stdout); + } catch (e) { + global.logError("[Voice Assistant] Error starting recording: " + e.message); + } + }, + + _readAudioData: function(stdout) { + stdout.read_bytes_async(4096, GLib.PRIORITY_DEFAULT, null, (source, result) => { + try { + let bytes = source.read_bytes_finish(result); + if (bytes && bytes.get_size() > 0) { + // Append new data to the existing buffer + let newData = new Uint8Array(bytes.get_data()); + let combinedBuffer = new Uint8Array(this._audioBuffer.length + newData.length); + combinedBuffer.set(this._audioBuffer); + combinedBuffer.set(newData, this._audioBuffer.length); + this._audioBuffer = combinedBuffer; + + // If we have accumulated 4096 or more bytes, send them + while (this._audioBuffer.length >= 4096) { + let chunkToSend = this._audioBuffer.slice(0, 4096); + this._streamSocket.send_binary(chunkToSend); +// global.log("[Voice Assistant] Sent 4096 bytes of audio data"); + + // Keep the remaining data in the buffer + this._audioBuffer = this._audioBuffer.slice(4096); + } + + // Continue reading + this._readAudioData(stdout); + } else { + global.log("[Voice Assistant] End of audio stream reached"); +// // Send any remaining data in the buffer +// if (this._audioBuffer.length > 0) { +// this._streamSocket.send_binary(this._audioBuffer); +// global.log("[Voice Assistant] Sent final " + this._audioBuffer.length + " bytes of audio data"); +// } + this._stopRecording(); + } + } catch (e) { + global.logError("[Voice Assistant] Error reading audio data: " + e.message); + this._stopRecording(); + } + }); + }, + + _stopRecording: function() { + if (!this._isRecording) return; + + this._isRecording = false; + this._setCustomIcon(this.metadata.path + "/icon.svg"); + global.log("[Voice Assistant] Stopping recording"); + + if (this._recorder) { + this._recorder.force_exit(); + this._recorder = null; + global.log("[Voice Assistant] Recording subprocess terminated"); + } + // Clear the audio buffer + this._audioBuffer = new Uint8Array(0); + }, + + on_applet_clicked: function() { + if (this._isRecording) { + global.log("[Voice Assistant] Applet clicked: stopping recording"); + this._stopRecording(); + } else { + global.log("[Voice Assistant] Applet clicked: starting recording"); + this._startRecording(); + } + }, + + on_applet_removed_from_panel: function() { + global.log("[Voice Assistant] Applet removed from panel"); + this._stopRecording(); + if (this._streamSocket) { + this._streamSocket.close(Soup.WebsocketCloseCode.NORMAL, null); + global.log("[Voice Assistant] Record WebSocket closed"); + } + if (this._nodeSocket) { + this._nodeSocket.close(Soup.WebsocketCloseCode.NORMAL, null); + global.log("[Voice Assistant] Node WebSocket closed"); + } + if (this._player) { + this._player.force_exit(); + global.log("[Voice Assistant] Audio player terminated"); + } + } +}; + +function main(metadata, orientation, panel_height, instance_id) { + global.log("[Voice Assistant] Main function called"); + return new MyApplet(metadata, orientation, panel_height, instance_id); +} diff --git a/icon-active.svg b/icon-active.svg new file mode 100644 index 0000000..9b90d18 --- /dev/null +++ b/icon-active.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/icon.svg b/icon.svg new file mode 100644 index 0000000..31a0a42 --- /dev/null +++ b/icon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/metadata.json b/metadata.json new file mode 100644 index 0000000..762c805 --- /dev/null +++ b/metadata.json @@ -0,0 +1,7 @@ +{ + "uuid": "neon-client@mcknight.tech", + "name": "Neon AI Client", + "description": "Interact with Neon AI via a HANA endpoint", + "icon": "microphone-sensitivity-medium", + "settings-schema": "settings-schema" +} diff --git a/settings-schema.json b/settings-schema.json new file mode 100644 index 0000000..8e94ff2 --- /dev/null +++ b/settings-schema.json @@ -0,0 +1,12 @@ +{ + "baseUrl": { + "type": "entry", + "default": "hana.neonaialpha.com", + "description": "Base URL for HANA Websocket" + }, + "token": { + "type": "entry", + "default": "", + "description": "Valid token for authentication" + } +}