mirror of
https://github.com/khoaliber/khoj.git
synced 2026-03-09 21:29:11 +00:00
Use base64 encoded image instead of source URL for persistence
The source URL returned by OpenAI would expire soon. This would make the chat sessions contain non-accessible images/messages if using OpenaI image URL Get base64 encoded image from OpenAI and store directly in conversation logs. This resolves the image link expiring issue
This commit is contained in:
@@ -181,7 +181,7 @@
|
|||||||
|
|
||||||
function renderMessageWithReference(message, by, context=null, dt=null, onlineContext=null, intentType=null) {
|
function renderMessageWithReference(message, by, context=null, dt=null, onlineContext=null, intentType=null) {
|
||||||
if (intentType === "text-to-image") {
|
if (intentType === "text-to-image") {
|
||||||
let imageMarkdown = ``;
|
let imageMarkdown = ``;
|
||||||
renderMessage(imageMarkdown, by, dt);
|
renderMessage(imageMarkdown, by, dt);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -254,20 +254,11 @@
|
|||||||
md.renderer.rules.image = function(tokens, idx, options, env, self) {
|
md.renderer.rules.image = function(tokens, idx, options, env, self) {
|
||||||
let token = tokens[idx];
|
let token = tokens[idx];
|
||||||
|
|
||||||
// Get image source url. Only render images with src links
|
|
||||||
let srcIndex = token.attrIndex('src');
|
|
||||||
if (srcIndex < 0) { return ''; }
|
|
||||||
let src = token.attrs[srcIndex][1];
|
|
||||||
|
|
||||||
// Wrap the image in a link
|
|
||||||
var aStart = `<a href="${src}" target="_blank">`;
|
|
||||||
var aEnd = '</a>';
|
|
||||||
|
|
||||||
// Add class="text-to-image" to images
|
// Add class="text-to-image" to images
|
||||||
token.attrPush(['class', 'text-to-image']);
|
token.attrPush(['class', 'text-to-image']);
|
||||||
|
|
||||||
// Use the default renderer to render image markdown format
|
// Use the default renderer to render image markdown format
|
||||||
return aStart + self.renderToken(tokens, idx, options) + aEnd;
|
return self.renderToken(tokens, idx, options);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Render markdown
|
// Render markdown
|
||||||
@@ -435,8 +426,8 @@
|
|||||||
if (chunk.startsWith("{") && chunk.endsWith("}")) {
|
if (chunk.startsWith("{") && chunk.endsWith("}")) {
|
||||||
try {
|
try {
|
||||||
const responseAsJson = JSON.parse(chunk);
|
const responseAsJson = JSON.parse(chunk);
|
||||||
if (responseAsJson.imageUrl) {
|
if (responseAsJson.image) {
|
||||||
rawResponse += ``;
|
rawResponse += ``;
|
||||||
}
|
}
|
||||||
if (responseAsJson.detail) {
|
if (responseAsJson.detail) {
|
||||||
rawResponse += responseAsJson.detail;
|
rawResponse += responseAsJson.detail;
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ export class KhojChatModal extends Modal {
|
|||||||
if (!message) {
|
if (!message) {
|
||||||
return;
|
return;
|
||||||
} else if (intentType === "text-to-image") {
|
} else if (intentType === "text-to-image") {
|
||||||
let imageMarkdown = ``;
|
let imageMarkdown = ``;
|
||||||
this.renderMessage(chatEl, imageMarkdown, sender, dt);
|
this.renderMessage(chatEl, imageMarkdown, sender, dt);
|
||||||
return;
|
return;
|
||||||
} else if (!context) {
|
} else if (!context) {
|
||||||
@@ -317,8 +317,8 @@ export class KhojChatModal extends Modal {
|
|||||||
if (responseText.startsWith("{") && responseText.endsWith("}")) {
|
if (responseText.startsWith("{") && responseText.endsWith("}")) {
|
||||||
try {
|
try {
|
||||||
const responseAsJson = JSON.parse(responseText);
|
const responseAsJson = JSON.parse(responseText);
|
||||||
if (responseAsJson.imageUrl) {
|
if (responseAsJson.image) {
|
||||||
responseText = ``;
|
responseText = ``;
|
||||||
} else if (responseAsJson.detail) {
|
} else if (responseAsJson.detail) {
|
||||||
responseText = responseAsJson.detail;
|
responseText = responseAsJson.detail;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -190,7 +190,7 @@ To get started, just start typing below. You can also type / to see a list of co
|
|||||||
|
|
||||||
function renderMessageWithReference(message, by, context=null, dt=null, onlineContext=null, intentType=null) {
|
function renderMessageWithReference(message, by, context=null, dt=null, onlineContext=null, intentType=null) {
|
||||||
if (intentType === "text-to-image") {
|
if (intentType === "text-to-image") {
|
||||||
let imageMarkdown = ``;
|
let imageMarkdown = ``;
|
||||||
renderMessage(imageMarkdown, by, dt);
|
renderMessage(imageMarkdown, by, dt);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -263,20 +263,11 @@ To get started, just start typing below. You can also type / to see a list of co
|
|||||||
md.renderer.rules.image = function(tokens, idx, options, env, self) {
|
md.renderer.rules.image = function(tokens, idx, options, env, self) {
|
||||||
let token = tokens[idx];
|
let token = tokens[idx];
|
||||||
|
|
||||||
// Get image source url. Only render images with src links
|
|
||||||
let srcIndex = token.attrIndex('src');
|
|
||||||
if (srcIndex < 0) { return ''; }
|
|
||||||
let src = token.attrs[srcIndex][1];
|
|
||||||
|
|
||||||
// Wrap the image in a link
|
|
||||||
var aStart = `<a href="${src}" target="_blank">`;
|
|
||||||
var aEnd = '</a>';
|
|
||||||
|
|
||||||
// Add class="text-to-image" to images
|
// Add class="text-to-image" to images
|
||||||
token.attrPush(['class', 'text-to-image']);
|
token.attrPush(['class', 'text-to-image']);
|
||||||
|
|
||||||
// Use the default renderer to render image markdown format
|
// Use the default renderer to render image markdown format
|
||||||
return aStart + self.renderToken(tokens, idx, options) + aEnd;
|
return self.renderToken(tokens, idx, options);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Render markdown
|
// Render markdown
|
||||||
@@ -440,8 +431,8 @@ To get started, just start typing below. You can also type / to see a list of co
|
|||||||
if (chunk.startsWith("{") && chunk.endsWith("}")) {
|
if (chunk.startsWith("{") && chunk.endsWith("}")) {
|
||||||
try {
|
try {
|
||||||
const responseAsJson = JSON.parse(chunk);
|
const responseAsJson = JSON.parse(chunk);
|
||||||
if (responseAsJson.imageUrl) {
|
if (responseAsJson.image) {
|
||||||
rawResponse += ``;
|
rawResponse += ``;
|
||||||
}
|
}
|
||||||
if (responseAsJson.detail) {
|
if (responseAsJson.detail) {
|
||||||
rawResponse += responseAsJson.detail;
|
rawResponse += responseAsJson.detail;
|
||||||
|
|||||||
@@ -706,9 +706,9 @@ async def chat(
|
|||||||
status_code=200,
|
status_code=200,
|
||||||
)
|
)
|
||||||
elif conversation_command == ConversationCommand.Image:
|
elif conversation_command == ConversationCommand.Image:
|
||||||
image_url, status_code = await text_to_image(q)
|
image, status_code = await text_to_image(q)
|
||||||
await sync_to_async(save_to_conversation_log)(q, image_url, user, meta_log, intent_type="text-to-image")
|
await sync_to_async(save_to_conversation_log)(q, image, user, meta_log, intent_type="text-to-image")
|
||||||
content_obj = {"imageUrl": image_url, "intentType": "text-to-image"}
|
content_obj = {"image": image, "intentType": "text-to-image"}
|
||||||
return Response(content=json.dumps(content_obj), media_type="application/json", status_code=status_code)
|
return Response(content=json.dumps(content_obj), media_type="application/json", status_code=status_code)
|
||||||
|
|
||||||
# Get the (streamed) chat response from the LLM of choice.
|
# Get the (streamed) chat response from the LLM of choice.
|
||||||
|
|||||||
@@ -252,7 +252,7 @@ def generate_chat_response(
|
|||||||
|
|
||||||
async def text_to_image(message: str) -> Tuple[Optional[str], int]:
|
async def text_to_image(message: str) -> Tuple[Optional[str], int]:
|
||||||
status_code = 200
|
status_code = 200
|
||||||
image_url = None
|
image = None
|
||||||
|
|
||||||
# Send the audio data to the Whisper API
|
# Send the audio data to the Whisper API
|
||||||
text_to_image_config = await ConversationAdapters.aget_text_to_image_model_config()
|
text_to_image_config = await ConversationAdapters.aget_text_to_image_model_config()
|
||||||
@@ -264,13 +264,13 @@ async def text_to_image(message: str) -> Tuple[Optional[str], int]:
|
|||||||
client = openai.OpenAI(api_key=openai_chat_config.api_key)
|
client = openai.OpenAI(api_key=openai_chat_config.api_key)
|
||||||
text2image_model = text_to_image_config.model_name
|
text2image_model = text_to_image_config.model_name
|
||||||
try:
|
try:
|
||||||
response = client.images.generate(prompt=message, model=text2image_model)
|
response = client.images.generate(prompt=message, model=text2image_model, response_format="b64_json")
|
||||||
image_url = response.data[0].url
|
image = response.data[0].b64_json
|
||||||
except openai.OpenAIError as e:
|
except openai.OpenAIError as e:
|
||||||
logger.error(f"Image Generation failed with {e.http_status}: {e.error}")
|
logger.error(f"Image Generation failed with {e.http_status}: {e.error}")
|
||||||
status_code = 500
|
status_code = 500
|
||||||
|
|
||||||
return image_url, status_code
|
return image, status_code
|
||||||
|
|
||||||
|
|
||||||
class ApiUserRateLimiter:
|
class ApiUserRateLimiter:
|
||||||
|
|||||||
Reference in New Issue
Block a user