星火Scala代码性能优化的内存开销错误

问题描述 投票:-1回答:1

您好我在斯卡拉火花写了下面的代码本,同时用640万条记录,我得到这个错误运行此

异常线程“main” java.lang.OutOfMemoryError:GC开销超过限制

甚至像警告WARN WindowExec:无分区定义窗口的操作!所有的数据移动到单个分区,这可能会导致严重的性能下降。

谁能帮我这,将是您的最终真的很有帮助。我试图与提的执行内存和所有,但仍然是相同的。

   trait init5 {
  var metaData: String = null
  var ColumnsList = new ListBuffer[String]()
  var fileList = new ListBuffer[String]()
  var dateList = new ListBuffer[String]()
  var fileL = new ListBuffer[String]()
  var ymlFilePath: String = ""
  var csvFilePath: String = ""
  var delimiter: String = ""
var count_range_column: String = ""
var usage_column: String = ""
var date_column: String = ""
var millioncount_column: String = ""
var zero_morethanzero_column: String = ""
var serviceOrders_column: String = ""
var empty_null_column: String = ""
var simple_columns: String = ""
 }

 object yml2 extends init5 {


    def main(args: Array[String]) {




  val sparkConf = new SparkConf().setMaster("local[*]").setAppName("hbase 
  sql").set("SPARK_EXECUTOR_MEMORY", "6g").set("spark.network.timeout","900s").set("spark.task.max.failure","24").set("spark.memory.offheap.size","31113699737")
   val sc = new SparkContext(sparkConf)
   val spark1 = SparkSession.builder().config(sc.getConf).getOrCreate()
    val sqlContext = spark1.sqlContext

    val propertypath: String = "C:/Users/ayushgup/Desktop/properties.yml"



      getProperty(propertypath)


     import spark1.implicits._

        def f1(number: Double) = {
  "%.2f".format(number).toDouble
}
val udfFunc = udf(f1 _)
    import scala.util.control.Exception._

def getCountPercent(df: DataFrame): DataFrame = {
  allCatch opt df
  df.withColumn("SUM", sum("count").over())
    .withColumn("fraction", col("count") / sum("count").over())
    .withColumn("Percent", col("fraction") * 100)
    .withColumn("number", udfFunc(col("Percent")))
    .drop("Percent")
    .drop("fraction")
}

 def occurenceCount(df: DataFrame,column: String)    
{

 var usageFinalDF = df.groupBy(column).count.transform(getCountPercent)           

   for (value <- usageFinalDF.collect()){
         fileList += column + "~" + value.mkString("~")
  }
}


Logger.getLogger("org").setLevel(Level.WARN)



         val headerCSV  = spark1.sqlContext.read.format("CSV").option("header","true").option("delimiter", """|""").load("C:\\Users\\ayushgup\\Downloads\\Header3.csv")

  val columns = headerCSV.columns


  val data =   spark1.sqlContext.read.format("CSV").option("delimiter", """~""").load("C:\\Users\\ayushgup\\Desktop\\GDF_postpaid_customer_PROD\\postpaid_customers_20190131_3474270.csv").toDF(columns:_*)


for (coll <- columns.toList) {


  if (date_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    for (datesss <- data.select(coll)) {
      dateList += datesss.toString().slice(1, 8)

    }

    var dateFinalDF = dateList.toList.toDF(coll)

    occurenceCount(dateFinalDF,coll)

  } else if (count_range_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    var r = data.select(coll).withColumn(coll, when(col(coll) <= 1500, "1-1500").when(col(coll) > 1500 && col(coll) < 1700, "1500-1700")
      .when(col(coll) > 1700 && col(coll) < 1900, "1700-1900")
      .when(col(coll) > 1900 && col(coll) < 2000, "1900-2000")
      .when(col(coll) > 2000, ">2000")
      .otherwise(0))
       .toDF(coll)

    occurenceCount(r,coll)

  } else if (usage_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    var r = data.select(coll).withColumn(coll, when(col(coll) <= 1026, "<=1gb").when(col(coll) > 1026 && col(coll) < 5130, "1-5gb")
      .when(col(coll) > 5130 && col(coll) < 10260, "5-10gb")
      .when(col(coll) > 10260 && col(coll) < 20520, "10-20gb")
      .when(col(coll) > 20520, ">20gb")
      .otherwise(0)).toDF(coll)

      occurenceCount(r,coll)

  } else if (millioncount_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    var r = data.select(coll).withColumn(coll, when(col(coll) > 1000000 && col(coll) < 5000000, "1-5m").when(col(coll) > 5000000 && col(coll) < 11000000, "5-11m")
      .when(col(coll) > 12000000 && col(coll) < 23000000, "12-23m")
      .when(col(coll) > 24000000 && col(coll) < 35000000, "24-35m")
      .when(col(coll) > 36000000, ">36m")
      .otherwise(0)).toDF(coll)

   occurenceCount(r,coll)

  } else if (zero_morethanzero_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    var r = data.select(coll).withColumn(coll, when(col(coll) === 0, "0").when(col(coll) > 0, ">0")
      .otherwise(1)).toDF(coll)

   occurenceCount(r,coll)

  } else if (serviceOrders_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    var r = data.select(coll).withColumn(coll, when(col(coll) === 0, "0").when(col(coll) === 1, "1")
      .when(col(coll) === 2, "2")
      .when(col(coll) === 3, "3")
      .when(col(coll) > 3, ">3"))
      .toDF(coll)

   occurenceCount(r,coll)
  } else if (empty_null_column.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

    var condition = data.select(coll).withColumn(coll, when(col(coll) === 0, "empty, n/a").otherwise(1)).toDF(coll)

   occurenceCount(condition,coll)
  } else if (simple_columns.toLowerCase().trim().split(",").contains(coll.toLowerCase())) {

   val actData1 = data.select(coll).toDF(coll)

    occurenceCount(actData1,coll)

  } 
}


var f = fileList.toList
for (flist <- f) {

  fileL += flist.replaceAll("[\\[\\]]", "")

}
//Creating the Final List into a DataFrame

     Logger.getLogger("org").info("Creating the Final List into a DataFrame")
     var ff = fileL.toDF().selectExpr("split(value, '~')[0] as Attribute_Name", "split(value, '~')[1] as Attribute_Value","split(value, '~')[2] as Attribute_Count","split(value, '~')[3] as Attribute_Total_Count","split(value, '~')[4] as Attribute_Percentage");


   df1.show(50000) 

  spark1.stop()

 }



   //Get metaData from application.yml file

    def getProperty(propertyFilePath: String) {
val source = Source.fromFile(propertyFilePath)

for (line <- source.getLines()) {

  if (line.contains("app_schema_path")) {

    ymlFilePath = line.split(":").last.mkString.trim()

  } else if (line.contains("data_path")) {
    csvFilePath = line.split(":").last.mkString.trim()
  } else if (line.contains("delimiter")) {
    delimiter = line.split(":").last.mkString.trim()
  } else if (line.contains("count_range_column")) {
    count_range_column = line.split(":").last.mkString.trim()
  } else if (line.contains("usage_column")) {
    usage_column = line.split(":").last.mkString.trim()
  } else if (line.contains("date_column")) {
    date_column = line.split(":").last.mkString.trim()
  } else if (line.contains("millioncount_column")) {
    millioncount_column = line.split(":").last.mkString.trim()
  } else if (line.contains("zero_morethanzero_column")) {
    zero_morethanzero_column = line.split(":").last.mkString.trim()
  } else if (line.contains("serviceOrders_column")) {
    serviceOrders_column = line.split(":").last.mkString.trim()
  } else if (line.contains("empty_null_column")) {
    empty_null_column = line.split(":").last.mkString.trim()
  } else if (line.contains("simple_columns")) {
    simple_columns = line.split(":").last.mkString.trim()
  } 
}

source.close()

  }

     }
scala apache-spark hadoop bigdata
1个回答
-1
投票

请尝试以下更改。

spark.yarn.executor.memoryOverhead = 10000 (1 GB)

(或更多)和

spark.memory.offHeap.enabled = true

你已经offHead内存,但没有启用,并设置存储器头顶较高的GB会给你的执行更多的内存的工作。

上面要解决内存问题。

如果你的情况下,提供更多的日志,如果问题仍然存在,这将是有益的。

© www.soinside.com 2019 - 2024. All rights reserved.