pytorch s3fd pga_attack,loss.backward()获取grad.data的问题

问题描述 投票:0回答:1
from detection.sfd import sfd_detector
def pgd_attack(model, input_data, eps=0.03, alpha=0.01, attack_steps=1, device='cpu'):
    ta = input_data.requires_grad_(True).to(device)
    perturbation = torch.zeros_like(input_data,requires_grad=True).to(device)

    for step in range(attack_steps):
        pred_result = model.detect_from_image((input_data + perturbation).clamp(0, 255))
        pred_box, pred_conf = [], []

        for result in pred_result:
            pred_box.append(result[:4])
            pred_conf.append(result[4])
        
        pred_conf = torch.tensor(pred_conf, dtype=torch.float32, device=device)
        potential_true_index,potential_false_index=classify_index_by_iou(pred_box,ground_truth_box,thresh_IOU=0.3)
        loss = loss_function(pred_conf,potential_true_index,potential_false_index)
        loss.backward()
        grad = perturbation.grad.data

出现此错误:RuntimeError:张量的元素 0 不需要 grad 并且没有 grad_fn

相关代码(sfd_ detector.py)

models_urls = {
    's3fd': 'https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth',
}


class SFDDetector(FaceDetector):
    def __init__(self, device, path_to_detector=None, verbose=False, filter_threshold=0.5):
        super(SFDDetector, self).__init__(device, verbose)

        # Initialise the face detector
        if path_to_detector is None:
            model_weights = load_url(models_urls['s3fd'])
        else:
            model_weights = torch.load(path_to_detector)

        self.fiter_threshold = filter_threshold
        self.face_detector = s3fd()
        self.face_detector.load_state_dict(model_weights)
        self.face_detector.to(device)
        self.face_detector.eval()

    def _filter_bboxes(self, bboxlist):
        if len(bboxlist) > 0:
            keep = nms(bboxlist, 0.3)
            bboxlist = bboxlist[keep, :]
            bboxlist = [x for x in bboxlist if x[-1] > self.fiter_threshold]

        return bboxlist

    # def detect_from_image(self, tensor_or_path):
    def detect_from_image(self, image):        
        # image = self.tensor_or_path_to_ndarray(tensor_or_path)

        # bboxlist = detect(self.face_detector, image, device=self.device)[0]
        bboxlist = batch_detect(self.face_detector, image, device=self.device)[0]

        bboxlist = self._filter_bboxes(bboxlist)

        return bboxlist

    def detect_from_batch(self, tensor):
        bboxlists = batch_detect(self.face_detector, tensor, device=self.device)

        new_bboxlists = []
        for i in range(bboxlists.shape[0]):
            bboxlist = bboxlists[i]
            bboxlist = self._filter_bboxes(bboxlist)
            new_bboxlists.append(bboxlist)

        return new_bboxlists

检测.py

def batch_detect(net, img_batch, device):
    """
    Inputs:
        - img_batch: a torch.Tensor of shape (Batch size, Channels, Height, Width)
    """

    if 'cuda' in device:
        torch.backends.cudnn.benchmark = True

    batch_size = img_batch.size(0)
    img_batch = img_batch.to(device, dtype=torch.float32)

    img_batch = img_batch.flip(-3)  # RGB to BGR
    img_batch = img_batch - torch.tensor([104.0, 117.0, 123.0], device=device).view(1, 3, 1, 1)

    # with torch.no_grad():
    olist = net(img_batch)  # patched uint8_t overflow error

    for i in range(len(olist) // 2):
        olist[i * 2] = F.softmax(olist[i * 2], dim=1)

    olist = [oelem.data.cpu().numpy() for oelem in olist]

    bboxlists = get_predictions(olist, batch_size)
    return bboxlists


def get_predictions(olist, batch_size):
    bboxlists = []
    variances = [0.1, 0.2]
    for i in range(len(olist) // 2):
        ocls, oreg = olist[i * 2], olist[i * 2 + 1]
        stride = 2**(i + 2)    # 4,8,16,32,64,128
        poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
        for Iindex, hindex, windex in poss:
            axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
            priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
            score = ocls[:, 1, hindex, windex][:,None]
            loc = oreg[:, :, hindex, windex].copy()
            boxes = decode(loc, priors, variances)
            bboxlists.append(np.concatenate((boxes, score), axis=1))
    
    if len(bboxlists) == 0: # No candidates within given threshold
        bboxlists = np.array([[] for _ in range(batch_size)])
    else:
        bboxlists = np.stack(bboxlists, axis=1)
    return bboxlists

所有功能代码


def calculate_iou(box1, box2):
    x_left = max(box1[0], box2[0])
    y_top = max(box1[1], box2[1])
    x_right = min(box1[2], box2[2])
    y_bottom = min(box1[3], box2[3])

    if x_right < x_left or y_bottom < y_top:
        return 0.0  # 没有交集

    intersection_area = (x_right - x_left) * (y_bottom - y_top)
    box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
    box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
    # 并集面积
    union_area = box1_area + box2_area - intersection_area
    iou = intersection_area / union_area
    return iou

def classify_index_by_iou(pred_box,ground_truth_box,thresh_IOU = 0.3):
    potential_true_index = []
    potential_false_index = []

    for i,pred in enumerate(pred_box):    
        match_found=False
        for truth in ground_truth_box:
            if calculate_iou(pred,truth)>thresh_IOU:
                potential_true_index.append(i)
                match_found=True
                break
        if not match_found:
            potential_false_index.append(i)
    return potential_true_index,potential_false_index

def loss_function(pred_conf, potential_true_index, potential_false_index, threshold_false_box=1000):
    true_detection_loss = torch.tensor(0.0, dtype=torch.float32, device='cpu')
    false_detection_loss = torch.tensor(0.0, dtype=torch.float32, device='cpu')
    # true_detection_loss=torch.log(1-pred_conf[potential_true_index]).sum()
    # false_detection_loss=torch.log(pred_conf[potential_false_index[:threshold_false_box]]).sum()
    for i in potential_true_index:
        true_detection_loss -= torch.log(1 - pred_conf[i])
    for j in potential_false_index[:threshold_false_box]:
        false_detection_loss -= torch.log(pred_conf[j])
    loss = torch.add(true_detection_loss,false_detection_loss)
    print(loss)
    return loss

我只是想正确获取 grad.data 信息...我真的不知道问题是什么。我需要将所有 numpy 函数更改为 pytorch 吗? 我被这个问题困扰了 1 周,请帮忙,thx

pytorch loss-function backwards-compatibility
1个回答
0
投票

通过打印

loss.backward()
来检查
loss.requires_grad
之前的损失是否需要分级。如果不是,您应该检查损失计算函数,如果:

  • 你的 for 循环被调用了吗?
  • 如果是,请检查是否有
    pred_conf[i]
    需要毕业?

据我所知,

detect.py
中的函数将张量转换为
numpy
和python,这打破了梯度链。这应该就是为什么你的
loss
不需要毕业。

© www.soinside.com 2019 - 2024. All rights reserved.