如何解释边界框的响应,谷歌云端点



我已经在谷歌云中使用新的Vertex AI训练并部署了自定义对象检测模型的端点。当我在云上测试模型时,我会在图像上看到完美的边界框。但当我使用python将请求发送到终点时,响应中得到的边界框似乎不正确。请注意,我正在乘以宽度和高度。

我的代码:-

import base64
from google.cloud import aiplatform
import cv2
from google.cloud.aiplatform.gapic.schema import predict
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="C:\Users\tarunmis\Downloads\first-cascade-315219-ccaaa402f837.json"
IMAGE_PATH = "C:\Users\tarunmis\Desktop\p2.jpg"
def predict_image_object_detection_sample(
project: str="MY STR",
endpoint_id: str="MY ID",
filename: str=IMAGE_PATH,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
with open(filename, "rb") as f:
file_content = f.read()
# The format of each instance should conform to the deployed model's prediction input schema.
encoded_content = base64.b64encode(file_content).decode("utf-8")
instance = predict.instance.ImageObjectDetectionPredictionInstance(
content=encoded_content,
).to_value()
instances = [instance]
# See gs://google-cloud-aiplatform/schema/predict/params/image_object_detection_1.0.0.yaml for the format of the parameters.
parameters = predict.params.ImageObjectDetectionPredictionParams(
confidence_threshold=0.5, max_predictions=10,
).to_value()
endpoint = client.endpoint_path(
project=project, location=location, endpoint=endpoint_id
)
response = client.predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
# See gs://google-cloud-aiplatform/schema/predict/prediction/image_object_detection.yaml for the format of the predictions.
predictions = response.predictions
preds = list()
print(response)
for prediction in predictions:
preds.append(dict(prediction))
return preds

# [END aiplatform_predict_image_object_detection_sample]
predictions = predict_image_object_detection_sample()
prediction = predictions[0]
image = cv2.imread(IMAGE_PATH,1)
h,w,c = image.shape
boxes = prediction['bboxes']
confs = prediction["confidences"]
for box,conf in zip(boxes,confs):
x1 = int(w*box[0])
y1 = int(h*box[1])
x2 = int(w*box[2])
y2 = int(h*box[3])
if conf>0.1:
cv2.circle(image,(x1,y1),5,(0,0,255),cv2.FILLED)
cv2.circle(image, (x2, y2), 5, (255, 0, 0), cv2.FILLED)
cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0))
cv2.imshow("img",image)
cv2.waitKey()

答案是:-

predictions {
struct_value {
fields {
key: "bboxes"
value {
list_value {
values {
list_value {
values {
number_value: 0.678395331
}
values {
number_value: 0.779298723
}
values {
number_value: 0.645786881
}
values {
number_value: 0.683837295
}
}
}
values {
list_value {
values {
number_value: 0.18701905
}
values {
number_value: 0.287654519
}
values {
number_value: 0.627796173
}
values {
number_value: 0.669630647
}
}
}
}
}
}
fields {
key: "confidences"
value {
list_value {
values {
number_value: 0.813014865
}
values {
number_value: 0.748636127
}
}
}
}
fields {
key: "displayNames"
value {
list_value {
values {
string_value: "plate"
}
values {
string_value: "plate"
}
}
}
}
fields {
key: "ids"
value {
list_value {
values {
string_value: "66451184247898112"
}
values {
string_value: "66451184247898112"
}
}
}
}
}
}
deployed_model_id: "1371469231836626944"

hw应该相反,x2y1:也应该相反

w,h,c = img.shape
...
x1 = int(w*boxes[0])
x2 = int(w*boxes[1])
y1 = int(h*boxes[2])
y2 = int(h*boxes[3])

相关内容

  • 没有找到相关文章

最新更新