从加拿大法律 HTML 网页中提取物种

问题描述 投票:0回答:1

我有这段代码来尝试从此处找到的法律中提取物种https://laws.justice.gc.ca/fra/lois/S-15.3/TexteComplet.html

但是,我无法让 html_nodes 找到每个部分

  section <- div_content %>% html_nodes(xpath = paste0("//h2[contains(text(), '", header, "')]/following-sibling::div[contains(@class, 'ProvisionList')]"))

基本上,我找不到一种方法来获取文本内容并匹配其他部分。我尝试添加“
”标签并查找每个部分的文本,但它不起作用(获得

{xml_nodeset (0)}

我试图获取在 id 为“425426”的 div 中找到的数据,然后在 ScheduleLabel 中从 ScheduleTitleText 获取文本。我需要另一列 SchedHeadL1(这是包含物种的部分的标题)和 BilingualGroupTitleText 中找到的文本(说明动物或植物组...)。然后提供一个嵌套的物种列表(这里我将物种与法语名称、拉丁语和英语分开)

library(rvest)
library(dplyr)
library(stringr)

# URL of the webpage
url <- "https://laws.justice.gc.ca/fra/lois/S-15.3/TexteComplet.html"

# Read the webpage content
webpage <- read_html(url)

# Extract the div with id "425426"
div_content <- webpage %>% html_node("#425426")

# Extract the header h2 with class "scheduleTitleText" from the class "scheduleLabel" and id "h-425427"
schedule_label <- div_content %>% html_node("h2.scheduleLabel#h-425427") %>% html_text()

# Extract all h2 headers with class "SchedHeadL1"
headers <- div_content %>% html_nodes("h2.SchedHeadL1") %>% html_text()


# Use str_extract to extract the "PARTIE #" part
partie_numbers <- str_extract(headers, "PARTIE \\d+")

# Use str_remove to remove the "PARTIE #" part from the original strings
descriptions <- str_remove(headers, "PARTIE \\d+")

# Combine into a data frame
result <- data.frame(Partie = partie_numbers, Description = descriptions, stringsAsFactors = FALSE)

headers_prep = result |> 
  unite(pd, Partie, Description, sep = "<br>") |> pull(pd)

# Initialize lists to store the extracted data
group_titles <- list()
item_first <- list()
item_second <- list()
scientific_names <- list()
latin_names <- list()

# Loop through each header to extract the associated content
for (header in headers) {
  # Extract the section associated with the current header
  section <- div_content %>% html_nodes(xpath = paste0("//h2[contains(text(), '", header, "')]/following-sibling::div[contains(@class, 'ProvisionList')]"))
  
  # Extract BilingualGroupTitleText within the section
  group_title <- section %>% html_nodes(".BilingualGroupTitleText") %>% html_text()
  group_titles <- c(group_titles, group_title)
  
  # Extract BilingualItemFirst within the section
  item_first_section <- section %>% html_nodes(".BilingualItemFirst") %>% html_text()
  item_first <- c(item_first, item_first_section)
  
  # Extract BilingualItemSecond within the section
  item_second_section <- section %>% html_nodes(".BilingualItemSecond") %>% html_text()
  item_second <- c(item_second, item_second_section)
  
  # Extract otherLang (scientific names) within the section
  scientific_name_section <- section %>% html_nodes(".otherLang") %>% html_text()
  scientific_names <- c(scientific_names, scientific_name_section)
  
  # Extract scientific Latin names from BilingualItemFirst
  latin_name_section <- str_extract(item_first_section, "\\(([^)]+)\\)") %>% str_replace_all("[()]", "")
  latin_names <- c(latin_names, latin_name_section)
}

# Ensure all columns have the same length by repeating the last element if necessary
max_length <- max(length(headers), length(group_titles), length(item_first), length(item_second), length(scientific_names), length(latin_names))

schedule_label <- rep(schedule_label, length.out = max_length)
headers <- rep(headers, length.out = max_length)
group_titles <- rep(group_titles, length.out = max_length)
item_first <- rep(item_first, length.out = max_length)
item_second <- rep(item_second, length.out = max_length)
scientific_names <- rep(scientific_names, length.out = max_length)
latin_names <- rep(latin_names, length.out = max_length)

# Create a data frame
data <- data.frame(
  ScheduleLabel = schedule_label,
  Header = headers,
  GroupTitle = group_titles,
  ItemFirst = item_first,
  ItemSecond = item_second,
  ScientificName = scientific_names,
  LatinName = latin_names,
  stringsAsFactors = FALSE
)
r rvest
1个回答
0
投票

不是最干净的代码——但它可以工作。

library(tidyverse)
library(rvest)

page <- "https://laws.justice.gc.ca/eng/acts/s-15.3/FullText.html" %>% 
  read_html()

page %>% 
  html_element(".Schedule") %>% 
  html_elements(".SchedHeadL1, .BilingualGroupTitleText, .BilingualItemFirst") %>% 
  map_chr(html_text2) %>% 
  tibble(species = .) %>% 
  mutate(section = if_else(str_detect(species, pattern = "PART"), species, NA), 
         group   = if_else(!str_detect(species, pattern = "\\("), species, NA)) %>%  
  fill(section) %>%  
  filter(!str_detect(species, "PART")) %>% 
  fill(group) %>%  
  filter(str_detect(species, "\\(")) %>% 
  mutate(across(section, ~ str_remove_all(.x, "PART \\d+\\n"))) 

# A tibble: 671 × 3
   species                                                                     section            group     
   <chr>                                                                       <chr>              <chr>     
 1 Ferret, Black-footed (Mustela nigripes)                                     Extirpated Species Mammals   
 2 Walrus, Atlantic (Odobenus rosmarus rosmarus) Northwest Atlantic population Extirpated Species Mammals   
 3 Whale, Grey (Eschrichtius robustus) Atlantic population                     Extirpated Species Mammals   
 4 Prairie-Chicken, Greater (Tympanuchus cupido pinnatus)                      Extirpated Species Birds     
 5 Sage-Grouse phaios subspecies, Greater (Centrocercus urophasianus phaios)   Extirpated Species Birds     
 6 Salamander, Eastern Tiger (Ambystoma tigrinum) Carolinian population        Extirpated Species Amphibians
 7 Gophersnake, Pacific (Pituophis catenifer catenifer)                        Extirpated Species Reptiles  
 8 Lizard, Pygmy Short-horned (Phrynosoma douglasii)                           Extirpated Species Reptiles  
 9 Rattlesnake, Timber (Crotalus horridus)                                     Extirpated Species Reptiles  
10 Turtle, Eastern Box (Terrapene carolina)                                    Extirpated Species Reptiles  
# ℹ 661 more rows
# ℹ Use `print(n = ...)` to see more rows
© www.soinside.com 2019 - 2024. All rights reserved.