52 lines
1.6 KiB
Python
52 lines
1.6 KiB
Python
import numpy as np
|
|
import tritonclient.http as httpclient
|
|
import cv2 # or use PIL.Image if preferred
|
|
from pathlib import Path
|
|
|
|
# Path to current .py file
|
|
current_file = Path(__file__)
|
|
current_dir = current_file.parent.resolve()
|
|
|
|
# -----------------------------
|
|
# Load JPEG and preprocess
|
|
# -----------------------------
|
|
image_path = current_dir / "shahab.jpg" # path to your JPEG file
|
|
img = cv2.imread(image_path) # BGR, shape: (H, W, 3)
|
|
img = cv2.resize(img, (160, 160)) # resize to 160x160
|
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert to RGB
|
|
img = img.astype(np.float32) / 255.0 # normalize to [0, 1]
|
|
|
|
# Change to NCHW (3, 160, 160)
|
|
img_chw = np.transpose(img, (2, 0, 1))
|
|
|
|
# Add batch dim: (1, 3, 160, 160)
|
|
input_data = img_chw[np.newaxis, :]
|
|
|
|
# -----------------------------
|
|
# Prepare Triton HTTP client
|
|
# -----------------------------
|
|
client = httpclient.InferenceServerClient(url="localhost:9000")
|
|
|
|
# Prepare input tensor
|
|
input_tensor = httpclient.InferInput("input", input_data.shape, "FP32")
|
|
input_tensor.set_data_from_numpy(input_data)
|
|
|
|
# Prepare expected outputs
|
|
output_names = ["embedding", "bbox", "score", "landmarks"]
|
|
output_tensors = [httpclient.InferRequestedOutput(name) for name in output_names]
|
|
|
|
# Send inference request
|
|
response = client.infer(
|
|
model_name="face_recognition",
|
|
inputs=[input_tensor],
|
|
outputs=output_tensors
|
|
)
|
|
|
|
# -----------------------------
|
|
# Print outputs
|
|
# -----------------------------
|
|
for name in output_names:
|
|
output = response.as_numpy(name)
|
|
print(f"{name}: shape={output.shape}, dtype={output.dtype}")
|
|
print(output)
|