diff --git a/src/interface/desktop/chat.html b/src/interface/desktop/chat.html
index cad7971f..6b7fde07 100644
--- a/src/interface/desktop/chat.html
+++ b/src/interface/desktop/chat.html
@@ -675,9 +675,13 @@
.then(response => response.ok ? response.json() : Promise.reject(response))
.then(data => { chatInput.value += data.text; })
.catch(err => {
- err.status == 422
- ? flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
- : flashStatusInChatInput("⛔️ Failed to transcribe audio")
+ if (err.status === 501) {
+ flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
+ } else if (err.status === 422) {
+ flashStatusInChatInput("⛔️ Audio file to large to process.")
+ } else {
+ flashStatusInChatInput("⛔️ Failed to transcribe audio.")
+ }
});
};
diff --git a/src/interface/obsidian/src/chat_modal.ts b/src/interface/obsidian/src/chat_modal.ts
index 145bae50..09f7a181 100644
--- a/src/interface/obsidian/src/chat_modal.ts
+++ b/src/interface/obsidian/src/chat_modal.ts
@@ -410,10 +410,12 @@ export class KhojChatModal extends Modal {
if (response.status === 200) {
console.log(response);
chatInput.value += response.json.text;
- } else if (response.status === 422) {
- throw new Error("⛔️ Failed to transcribe audio");
- } else {
+ } else if (response.status === 501) {
throw new Error("⛔️ Configure speech-to-text model on server.");
+ } else if (response.status === 422) {
+ throw new Error("⛔️ Audio file to large to process.");
+ } else {
+ throw new Error("⛔️ Failed to transcribe audio.");
}
};
diff --git a/src/khoj/interface/web/chat.html b/src/khoj/interface/web/chat.html
index c243ed67..e85759fb 100644
--- a/src/khoj/interface/web/chat.html
+++ b/src/khoj/interface/web/chat.html
@@ -638,9 +638,13 @@ To get started, just start typing below. You can also type / to see a list of co
.then(response => response.ok ? response.json() : Promise.reject(response))
.then(data => { chatInput.value += data.text; })
.catch(err => {
- err.status == 422
- ? flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
- : flashStatusInChatInput("⛔️ Failed to transcribe audio")
+ if (err.status === 501) {
+ flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
+ } else if (err.status === 422) {
+ flashStatusInChatInput("⛔️ Audio file to large to process.")
+ } else {
+ flashStatusInChatInput("⛔️ Failed to transcribe audio.")
+ }
});
};
diff --git a/src/khoj/routers/api.py b/src/khoj/routers/api.py
index d53f023a..4d2c80a2 100644
--- a/src/khoj/routers/api.py
+++ b/src/khoj/routers/api.py
@@ -626,8 +626,8 @@ async def transcribe(request: Request, common: CommonQueryParams, file: UploadFi
speech_to_text_config = await ConversationAdapters.get_speech_to_text_config()
openai_chat_config = await ConversationAdapters.get_openai_chat_config()
if not speech_to_text_config:
- # If the user has not configured a speech to text model, return an unprocessable entity error
- status_code = 422
+ # If the user has not configured a speech to text model, return an unsupported on server error
+ status_code = 501
elif openai_chat_config and speech_to_text_config.model_type == ChatModelOptions.ModelType.OPENAI:
api_key = openai_chat_config.api_key
speech2text_model = speech_to_text_config.model_name
diff --git a/src/khoj/routers/helpers.py b/src/khoj/routers/helpers.py
index f34ae815..a780eb20 100644
--- a/src/khoj/routers/helpers.py
+++ b/src/khoj/routers/helpers.py
@@ -258,8 +258,8 @@ async def text_to_image(message: str) -> Tuple[Optional[str], int]:
text_to_image_config = await ConversationAdapters.aget_text_to_image_model_config()
openai_chat_config = await ConversationAdapters.get_openai_chat_config()
if not text_to_image_config:
- # If the user has not configured a text to image model, return an unprocessable entity error
- status_code = 422
+ # If the user has not configured a text to image model, return an unsupported on server error
+ status_code = 501
elif openai_chat_config and text_to_image_config.model_type == TextToImageModelConfig.ModelType.OPENAI:
client = openai.OpenAI(api_key=openai_chat_config.api_key)
text2image_model = text_to_image_config.model_name