python循环第二次不调用函数

问题描述 投票:0回答:1

下面的代码给出了视频中“面部”的姿势估计。我修改了代码以将文件夹/目录作为输入,并期望它处理目录中的所有视频。使用下面的代码我希望处理文件夹中的所有视频,但'for'循环只处理一个视频而不处理其他视频,下面是循环,它只调用一次parse_video。

    if args.videoDirPath is not None:
    for videoName in os.listdir(folderName):
        print(videoName)
        video = cv2.VideoCapture(videoName)
        parse_video(video)

文件夹(videoFolder)有以下视频:

amir.mp4
arnab-srk.mp4
kanihya.mp4
simma.mp4
salman.mp4

产量

opt/anaconda3/lib/python3.7/site- 
packages/torchvision/transforms/transforms.py:207: UserWarning: The use of 
the transforms.Scale transform is deprecated, please use transforms.Resize 
instead.
warnings.warn("The use of the transforms.Scale transform is deprecated, " +

simma.mp4
frameNumber : 1
amir.mp4
creating...output/frame1.jpg
creating...output/frame2.jpg
creating...output/frame3.jpg
creating...output/frame4.jpg
creating...output/frame5.jpg
frameNumber : 6
arnab-srk.mp4
frameNumber : 6
kanihya.mp4
frameNumber : 6
salman.mp4
frameNumber : 6

输出文件夹:包含以下视频和文本文件:

     output-out-1.avi
     output-out-6.avi
     output-out.txt  # blank

我使用以下参数运行程序

!python code/test_on_video_dlib.py --snapshot hopenet_alpha1.pkl --face_model mmod_human_face_detector.dat --directoryPath videoFolder --output_string out --n_frames 20 --fps 200enter code here  

'test_on_video_dlib.py'的代码

  import sys, os, argparse
  import numpy as np
  import cv2
  import matplotlib.pyplot as plt
  import torch
  import torch.nn as nn
  from torch.autograd import Variable
  from torch.utils.data import DataLoader
  from torchvision import transforms
  import torch.backends.cudnn as cudnn
  import torchvision
  import torch.nn.functional as F
  from PIL import Image
  import datasets, hopenet, utils
  from skimage import io
  import dlib
  import face_alignment
  import numpy as np
  from mpl_toolkits.mplot3d import Axes3D
  import matplotlib.pyplot as plt
  from skimage import io

  def parse_video(video,nr):       

      # New cv2
      width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))   # float
      height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # float

      # Define the codec and create VideoWriter object
      fourcc = cv2.VideoWriter_fourcc(*'MJPG')

      out = cv2.VideoWriter('output/video/output-{}-{}.avi'.format(args.output_string, nr), fourcc, 
                  args.fps, (width, height))
      #frame_num = 1

      frame_num = nr # add nr here also

      while frame_num <= args.n_frames:
          #print frame_num
          ret,frame = video.read()
          if ret == False:
              break            
          #writing frames
          name = 'output/frame' + str(frame_num) + '.jpg'
          print("creating..." +name)
          cv2.imwrite(name,frame)        
          cv2_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
          # Dlib detect
          dets = cnn_face_detector(cv2_frame, 1)

          for idx, det in enumerate(dets):
            # Get x_min, y_min, x_max, y_max, conf
            x_min = det.rect.left()
            y_min = det.rect.top()
            x_max = det.rect.right()
            y_max = det.rect.bottom()
            conf = det.confidence

            if conf > 1.0:
                bbox_width = abs(x_max - x_min)
                bbox_height = abs(y_max - y_min)
                x_min -= 2 * bbox_width / 4
                x_max += 2 * bbox_width / 4
                y_min -= 3 * bbox_height / 4
                y_max += bbox_height / 4
                x_min = max(x_min, 0); y_min = max(y_min, 0)
                x_max = min(frame.shape[1], x_max); y_max = min(frame.shape[0], y_max)
                # Crop image
                img = cv2_frame[int(y_min):int(y_max),int(x_min):int(x_max)]
                img = Image.fromarray(img)

                # Transform
                img = transformations(img)
                img_shape = img.size()
                img = img.view(1, img_shape[0], img_shape[1], img_shape[2])
                img = Variable(img).cuda(gpu)

                yaw, pitch, roll = model(img)

                yaw_predicted = F.softmax(yaw,dim=1)
                pitch_predicted = F.softmax(pitch,dim=1)
                roll_predicted = F.softmax(roll,dim=1)
                # Get continuous predictions in degrees.
                yaw_predicted = torch.sum(yaw_predicted.data[0] * idx_tensor) * 3 - 99
                pitch_predicted = torch.sum(pitch_predicted.data[0] * idx_tensor) * 3 - 99
                roll_predicted = torch.sum(roll_predicted.data[0] * idx_tensor) * 3 - 99
                txt_out.write(('output/frame' + str(frame_num) + '.jpg') + ' %f %f %f\n' % (yaw_predicted, pitch_predicted,       roll_predicted))
                # utils.plot_pose_cube(frame, yaw_predicted, pitch_predicted, roll_predicted, (x_min + x_max) / 2, (y_min + y_max) / 2, size = bbox_width)
                utils.draw_axis(frame, yaw_predicted, pitch_predicted, roll_predicted, tdx = (x_min + x_max) / 2, tdy= (y_min + y_max) / 2, size = bbox_height/2)
                # Plot expanded bounding box
                # cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0,255,0), 1)

       out.write(frame)
       frame_num += 1

  out.release()
  video.release()
  return frame_num

  def parse_args():
      """Parse input arguments."""
      parser = argparse.ArgumentParser(description='Head pose estimation using the Hopenet network.')
      parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
        default=0, type=int)
      parser.add_argument('--snapshot', dest='snapshot', help='Path of model snapshot.',
      default='', type=str)
      parser.add_argument('--face_model', dest='face_model', help='Path of DLIB face detection model.',
      default='', type=str)
      parser.add_argument('--video', dest='video_path', help='Path of video')
      #code to pass video folder name
      parser.add_argument('--directoryPath',dest='videoDirPath' ,help="directory path containing all videos")
      parser.add_argument('--output_string', dest='output_string', help='String appended to output file')
      parser.add_argument('--n_frames', dest='n_frames', help='Number of frames', type=int)
      parser.add_argument('--fps', dest='fps', help='Frames per second of source video', type=float, default=30.)
      args = parser.parse_args()
      return args

  if __name__ == '__main__':
      args = parse_args()
      cudnn.enabled = True

      batch_size = 1
      gpu = args.gpu_id
      snapshot_path = args.snapshot
      out_dir = 'output/video'
      video_path = args.video_path
      #folder path code
      folderName = args.videoDirPath

      if not os.path.exists(out_dir):
          os.makedirs(out_dir)

      # ResNet50 structure
      model = hopenet.Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)

      # Dlib face detection model
      cnn_face_detector = dlib.cnn_face_detection_model_v1(args.face_model)

      #print 'Loading snapshot.'
      # Load snapshot
      saved_state_dict = torch.load(snapshot_path)
      model.load_state_dict(saved_state_dict)

      #print 'Loading data.'

      transformations = transforms.Compose([transforms.Scale(224),
      transforms.CenterCrop(224), transforms.ToTensor(),
      transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

      model.cuda(gpu)

      #print 'Ready to test network.'

      # Test the Model
       model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
      total = 0

      idx_tensor = [idx for idx in range(66)]
      idx_tensor = torch.FloatTensor(idx_tensor).cuda(gpu)

      if args.video_path is not None:
              video = cv2.VideoCapture(video_path)
              parse_video(video)
      # THIS IS THE LOOP I AM REFERRING IN QUESTION   
      nr=1 
      if args.videoDirPath is not None:
         for videoName in os.listdir(folderName):
             print(videoName)
             video = cv2.VideoCapture(videoName)
             nr = parse_video(video ,nr)    

预期产量:

我希望处理videoFolder中的每个视频,并在输出文件夹中创建其框架。

python opencv tensorflow computer-vision dlib
1个回答
0
投票

至于我,你必须使用正确的文件路径 - folderName/videoName

for videoName in os.listdir(folderName):

    videoName = os.path.join(folderName, videoName)  

    print(videoName)
    video = cv2.VideoCapture(videoName)
© www.soinside.com 2019 - 2024. All rights reserved.