Update image_processing_deformable_detr.py (#28561)
* Update image_processing_deformable_detr.py * Changes after running make fix-copies
This commit is contained in:
parent
e201864bcb
commit
a35ea570a8
|
@ -1414,13 +1414,14 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
|
|||
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
|
||||
|
||||
# and from relative [0, 1] to absolute [0, height] coordinates
|
||||
if isinstance(target_sizes, List):
|
||||
img_h = torch.Tensor([i[0] for i in target_sizes])
|
||||
img_w = torch.Tensor([i[1] for i in target_sizes])
|
||||
else:
|
||||
img_h, img_w = target_sizes.unbind(1)
|
||||
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
|
||||
boxes = boxes * scale_fct[:, None, :]
|
||||
if target_sizes is not None:
|
||||
if isinstance(target_sizes, List):
|
||||
img_h = torch.Tensor([i[0] for i in target_sizes])
|
||||
img_w = torch.Tensor([i[1] for i in target_sizes])
|
||||
else:
|
||||
img_h, img_w = target_sizes.unbind(1)
|
||||
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
|
||||
boxes = boxes * scale_fct[:, None, :]
|
||||
|
||||
results = []
|
||||
for s, l, b in zip(scores, labels, boxes):
|
||||
|
|
|
@ -1411,13 +1411,14 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
|
|||
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
|
||||
|
||||
# and from relative [0, 1] to absolute [0, height] coordinates
|
||||
if isinstance(target_sizes, List):
|
||||
img_h = torch.Tensor([i[0] for i in target_sizes])
|
||||
img_w = torch.Tensor([i[1] for i in target_sizes])
|
||||
else:
|
||||
img_h, img_w = target_sizes.unbind(1)
|
||||
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
|
||||
boxes = boxes * scale_fct[:, None, :]
|
||||
if target_sizes is not None:
|
||||
if isinstance(target_sizes, List):
|
||||
img_h = torch.Tensor([i[0] for i in target_sizes])
|
||||
img_w = torch.Tensor([i[1] for i in target_sizes])
|
||||
else:
|
||||
img_h, img_w = target_sizes.unbind(1)
|
||||
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
|
||||
boxes = boxes * scale_fct[:, None, :]
|
||||
|
||||
results = []
|
||||
for s, l, b in zip(scores, labels, boxes):
|
||||
|
|
Loading…
Reference in New Issue