Barcelona AI DXP Demo Endpoint Usage Guide
Base URL: http://3.7.36.230:8181/v1
Semantic Search
Endpoint: /search
Request Object: Simple Search Query
const axios = require('axios');
const url = "http://base_url/search";
const payload = {
query: "artificial intelligence applications",
search_params: {
return_metadata: true,
}
};
axios.post(url, payload, {
headers: {
"Content-Type": "application/json"
}
})
.then(response => {
console.log(response.data);
})
.catch(error => {
console.error(error);
});
Request Object: Media Library
import requests
url = "http://base_url/search"
payload = {
"query": "artificial intelligence applications",
"search_space": "media",
"search_params": {
"return_metadata": True,
}
}
headers = {
"Content-Type": "application/json"
}
response = requests.request("POST", url, json=payload, headers=headers)
print(response.text)
const axios = require('axios');
const url = "http://base_url/search";
const payload = {
query: "artificial intelligence applications",
search_space: "media",
search_params: {
return_metadata: true,
}
};
axios.post(url, payload, {
headers: {
"Content-Type": "application/json"
}
})
.then(response => {
console.log(response.data);
})
.catch(error => {
console.error(error);
});
Request Object: With additional filters
curl -X POST "http://base_url/search" -H "Content-Type: application/json" -d '{
"query": "artificial intelligence applications",
"search_space": "media",
"additional_filters": [
{
"field": "drupal_content_type",
"value": "article"
},
{
"field": "drupal_content_type",
"value": "document"
},
],
"search_params": {
"return_metadata": true,
}
}'
import requests
url = "http://base_url/search"
payload = {
"query": "artificial intelligence applications",
"search_space": "media",
"additional_filters": [
{
"field": "drupal_content_type",
"value": "article"
},
{
"field": "drupal_content_type",
"value": "document"
},
],
"search_params": {
"return_metadata": True,
}
}
headers = {
"Content-Type": "application/json"
}
response = requests.request("POST", url, json=payload, headers=headers)
print(response.text)
const axios = require('axios');
const url = "http://base_url/search";
const payload = {
query: "artificial intelligence applications",
search_space: "media",
additional_filters: [
{
field: "drupal_content_type",
value: "article"
},
{
field: "drupal_content_type",
value: "document"
},
],
search_params: {
return_metadata: true,
}
};
axios.post(url, payload, {
headers: {
"Content-Type": "application/json"
}
})
.then(response => {
console.log(response.data);
})
.catch(error => {
console.error(error);
});
Response Object:
{
"results": [
{
"doc_id": "d123",
"content": "This is a sample document about artificial intelligence...",
"score": 1.0,
"metadata": {
"title": "Artificial Intelligence",
"description": "A brief overview of artificial intelligence and its applications...",
"doc_type": "article",
"url": "https://example.com/article/artificial-intelligence"
}
},
{
"doc_id": "b783",
"content": "Machine learning is a subset of AI that focuses on data and algorithms...",
"score": 0.98,
"metadata": {
"title": "Machine Learning",
"description": "An introduction to machine learning and its applications...",
"doc_type": "pdf",
"url": "https://example.com/article/machine-learning.pdf"
}
}
]
}
Suggested Queries | Smart Chips | Snippets
Endpoint: /infer
(infer_type: "smart_chips")
Request Object: Used in case of no chats or other context and just a search query.
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "smart_chips",
"infer_params": {
"chip_type": "suggested_query",
"chip_params": {
"count": 3
},
"search": {
"query": "artificial intelligence applications"
}
}
}
headers = {
"Content-Type": "application/json"
}
with requests.post(url, json=payload, headers=headers, stream=True) as response:
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "smart_chips",
infer_params: {
chip_type: "suggested_query",
chip_params: {
count: 3
},
search: {
query: "artificial intelligence applications"
}
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Request Object: Used in case of no chats and just document context
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "smart_chips",
"infer_params": {
"chip_type": "suggested_query",
"chip_params": {
"count": 3
},
"search": {
"search_params": {
"doc_id": "doc_12345"
}
}
}
}
headers = {
"Content-Type": "application/json"
}
with requests.post(url, json=payload, headers=headers, stream=True) as response:
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "smart_chips",
infer_params: {
chip_type: "suggested_query",
chip_params: {
count: 3
},
search: {
search_params: {
doc_id: "doc_12345"
}
}
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Request Object: Used in case of an ongoing chat.
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "smart_chips",
"infer_params": {
"chip_type": "suggested_query",
"chip_params": {
"count": 3
},
"chat_session_id": "session_123"
}
}
headers = {
"Content-Type": "application/json"
}
with requests.post(url, json=payload, headers=headers, stream=True) as response:
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "smart_chips",
infer_params: {
chip_type: "suggested_query",
chip_params: {
count: 3
},
chat_session_id: "session_123"
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Request Object: Used in case of an ongoing chat and specific document context.
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "smart_chips",
"infer_params": {
"chip_type": "suggested_query",
"chip_params": {
"count": 3
},
"chat_session_id": "session_123",
"search": {
"search_params": {
"doc_id": "doc_12345"
}
}
}
}
headers = {
"Content-Type": "application/json"
}
with requests.post(url, json=payload, headers=headers, stream=True) as response:
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "smart_chips",
infer_params: {
chip_type: "suggested_query",
chip_params: {
count: 3
},
chat_session_id: "session_123",
search: {
search_params: {
doc_id: "doc_12345"
}
}
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Response Object: (per_value
streaming)
Received: {'key': 'smart_chip_type', 'value': 'suggested_query', 'stream_status': 'stream_running'}
Received: {'key': 'content', 'value': {'question': 'Suggested Query 1?', 'chunk_text': 'Relevant chunk for suggested query 1.', 'relevance_score': 0.77}, 'stream_status': 'stream_running'}
Received: {'key': 'content', 'value': {'question': 'Suggested Query 2?', 'chunk_text': 'Relevant chunk for suggested query 2.', 'relevance_score': 0.46}, 'stream_status': 'stream_running'}
Received: {'key': 'content', 'value': {'question': 'Suggested Query 3?', 'chunk_text': 'Relevant chunk for suggested query 3.', 'relevance_score': 0.35}, 'stream_status': 'stream_running'}
Received: {'key': '', 'value': '', 'stream_status': 'stream_over'}
Chat Inference
Endpoint: /infer
(infer_type: "chat")
Request Object: In case of no context and on the global site search page
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "chat",
"infer_params": {
"chat_session_id": "session_456",
"user_message": "What are some applications of artificial intelligence?",
"search": {
"search_space": "global"
}
}
}
headers = {
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers, stream=True)
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "chat",
infer_params: {
chat_session_id: "session_456",
user_message: "What are some applications of artificial intelligence?",
search: {
search_space: "global"
}
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Request Object: In case of an ongoing chat
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "chat",
"infer_params": {
"chat_session_id": "session_456",
"user_message": "What are some applications of artificial intelligence?"
}
}
headers = {
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers, stream=True)
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "chat",
infer_params: {
chat_session_id: "session_456",
user_message: "What are some applications of artificial intelligence?"
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Request Object: In case of an chat with specific document context
import requests
import json
url = "http://base_url/v1/infer"
payload = {
"infer_type": "chat",
"infer_params": {
"chat_session_id": "session_456",
"user_message": "What are some applications of artificial intelligence?",
"search": {
"search_params": {
"doc_id": "doc_12345"
}
}
}
}
headers = {
"Content-Type": "application/json"
}
response = requests.post(url, json=payload, headers=headers, stream=True)
for line in response.iter_lines():
if line:
data = json.loads(line)
print(f"Received: {data}")
if data.get('stream_status') == 'stream_over':
break
const url = "http://base_url/v1/infer";
const payload = {
infer_type: "chat",
infer_params: {
chat_session_id: "session_456",
user_message: "What are some applications of artificial intelligence?",
search: {
search_params: {
doc_id: "doc_12345"
}
}
}
};
fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(payload)
}).then(response => {
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
function processChunk({ done, value }) {
if (done) {
console.log('Stream complete');
return;
}
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
lines.forEach(line => {
if (line.trim() !== '') {
const data = JSON.parse(line);
console.log('Received:', data);
if (data.stream_status === 'stream_over') {
console.log('Stream completed');
}
}
});
reader.read().then(processChunk);
}
reader.read().then(processChunk);
}).catch(error => {
console.error('Error:', error);
});
Response Object: Partial Streaming Response
Received: {'key': 'assistant_response', 'value': 'Great ques', 'stream_status': 'stream_running'}
Received: {'key': 'assistant_response', 'value': 'tion! Let ', 'stream_status': 'stream_running'}
... running stream - truncated...
Received: {'key': 'assistant_response', 'value': 'topics and', 'stream_status': 'stream_running'}
Received: {'key': 'assistant_response', 'value': ' applicati', 'stream_status': 'stream_running'}
Received: {'key': 'assistant_response', 'value': 'ons....', 'stream_status': 'stream_running'}
Received: {'key': 'referred_context', 'value': {'doc_id': 'doc_24', 'content': 'This is sample document 24 about various AI topics and applications....', 'score': 0.9890900354435119, 'metadata': {'title': 'Document 24', 'description': 'A sample document about AI topics - 24', 'url': 'https://example.com/doc_24', 'custom_field': 'Custom value 24', 'doc_type': 'document'}}, 'stream_status': 'stream_running'}
Received: {'key': 'referred_context', 'value': {'doc_id': 'doc_45', 'content': 'This is sample document 45 about various AI topics and applications....', 'score': 0.9832366456127151, 'metadata': {'title': 'Document 45', 'description': 'A sample document about AI topics - 45', 'url': 'https://example.com/doc_45', 'custom_field': 'Custom value 45', 'doc_type': 'document'}}, 'stream_status': 'stream_running'}
Received: {'key': '', 'value': '', 'stream_status': 'stream_over'}
referred_context
is streamed as a single chunk so that it can be displayed as related context under the streamed chat.
- Also contains the metadata of the referred document to show similar to how search results are displayed.