本文介绍了Spark SqlContext和Hbase:java.lang.NoClassDefFoundError:org / apache / hadoop / hbase / util / Bytes的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述


$ b

sqlContext.sql(select * from尽管我已经包含了所有必需的jar,比如这个

export HADOOP_CLASSPATH = $ HADOOP_CLASSPATH:$(hbase classpath)



以及在HDP 2.5中启动Hive hbase处理程序的Spark shell(Spark 1.6.3 )

 spark-shell --master yarn-client --conf spark.yarn.queue = uk --executor-cores 10 --executor -memory 20G --num-executors 15 --driver-memory 2G --jars hive-hbase-handler-1.2.1000.2.4.0.1-6.jar 

我仍然收到Below错误。任何人都可以帮忙吗?

 
java.lang.NoClassDefFoundError:org / apache / hadoop / hbase / util / Bytes $ b $ org .apache.hadoop.hive.hbase.HBaseSerDe.parseColumnsMapping(HBaseSerDe.java:184)
at org.apache.hadoop.hive.hbase.HBaseSerDeParameters。(HBaseSerDeParameters.java:73)
at org。 apache.hadoop.hive.hbase.HBaseSerDe.initialize(HBaseSerDe.java:117)
at org.apache.hadoop.hive.serde2.AbstractSerDe.initialize(AbstractSerDe.java:53)
at org。 apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:521)
at org.apache.hadoop.hive.metastore.MetaStoreUtils.getDeserializer(MetaStoreUtils.java:391)
at org。 apache.hadoop.hive.ql.metadata.Table.getDeserializerFromMetaStore(Table.java:276)
at org.apache.hadoop.hive.ql.metadata.Table.getDeserializer(Table.java:258)
在org.apache.hadoop.hive.ql.metadata.Table.getCols(Table.java:605)
在org.apache.spark.sql.hive.client.ClientWrapper $$ anonfun $ getTabl eOption $ 1 $$ anonfun $ 3.apply(ClientWrapper.scala:347)
at org.apache.spark.sql.hive.client.ClientWrapper $$ anonfun $ getTableOption $ 1 $$ anonfun $ 3.apply(ClientWrapper.scala: 342)
at scala.Option.map(Option.scala:145)
at org.apache.spark.sql.hive.client.ClientWrapper $$ anonfun $ getTableOption $ 1.apply(ClientWrapper.scala: 342)
在org.apache.spark.sql.hive.client.ClientWrapper $$ anonfun $ getTableOption $ 1.apply(ClientWrapper.scala:337)
在org.apache.spark.sql.hive。 client.ClientWrapper $$ anonfun $ withHiveState $ 1.apply(ClientWrapper.scala:295)
at org.apache.spark.sql.hive.client.ClientWrapper.liftedTree1 $ 1(ClientWrapper.scala:242)
at org.apache.spark.sql.hive.client.ClientWrapper.withHiveState(ClientWrapper.scala:284)
at org.apache.spark.sql.hive.client.ClientWrapper.getTableOption(ClientWrapper.scala:337)
at org.apache.spark.sql.hiv e.client.ClientInterface $ class.getTable(ClientInterface.scala:122)
at org.apache.spark.sql.hive.client.ClientWrapper.getTable(ClientWrapper.scala:61)
at org。 apache.spark.sql.hive.HiveMetastoreCatalog.lookupRelation(HiveMetastoreCatalog.scala:414)
at org.apache.spark.sql.hive.HiveContext $$ anon $ 2.org $ apache $ spark $ sql $ catalyst $ analysis $ OverrideCatalog $$ super $ lookupRelation(HiveContext.scala:475)
at org.apache.spark.sql.catalyst.analysis.OverrideCatalog $ class.lookupRelation(Catalog.scala:162)
at org。 apache.spark.sql.hive.HiveContext $$ anon $ 2.lookupRelation(HiveContext.scala:475)
at org.apache.spark.sql.catalyst.analysis.Analyzer $ ResolveRelations $ .getTable(Analyzer.scala: 302)
在org.apache.spark.sql.catalyst.analysis.Analyzer $ ResolveRelations $$ anonfun $ apply $ 9.applyOrElse(Analyzer.scala:314)
at org.apache.spark.sql。 catalyst.analysis.Analyzer $ ResolveRelations $$ anonfun $ apply $ 9.applyOrElse(Analyzer.scala:309)
at org .apache.spark.sql.catalyst.plans.logical.LogicalPlan $$ anonfun $ resolveOperators $ 1.apply(LogicalPlan.scala:57)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan $ $ anonfun $ resolveOperators $ 1.apply(LogicalPlan.scala:57)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin $ .withOrigin(TreeNode.scala:69)
at org.apache .bark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan $$ anonfun $ 1.apply(LogicalPlan。 scala:54)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan $$ anonfun $ 1.apply(LogicalPlan.scala:54)
at org.apache.spark.sql。 catalyst.trees.TreeNode $$ anonfun $ 4.apply(TreeNode.scala:281)
at scala.collection.Iterator $$ anon $ 11.next(Iterator.scala:328)
at scala.collection。 Iterator $ class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at scala.collection.generic.Gro wable $ class。$ plus $ plus $ eq(Growable.scala:48)
at scala.collection.mutable.ArrayBuffer。$ plus $ plus $ eq(ArrayBuffer.scala:103)
at scala。
at scala.collection.TraversableOnce $ class.to(TraversableOnce.scala:273)
at scala.collection.AbstractIterator $ plus $ eq(ArrayBuffer.scala:47)
at scala.collection.TraversableOnce $ class.to .to(Iterator.scala:1157)
at scala.collection.TraversableOnce $ class.toBuffer(TraversableOnce.scala:265)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
at scala.collection.TraversableOnce $ class.toArray(TraversableOnce.scala:252)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
at org.apache.spark。 sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:321)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan $$ anonfun $ 1.适用(LogicalPlan.scala:54)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan $$ anonfun $ 1.apply(LogicalPlan.scala:54)
at org.apache.spark.sql.catalyst.trees。 TreeNode $$ anonfun $ 4.apply(TreeNode.scala:281)
at scala.collection.Iterator $$ anon $ 11.next(Iterator.scala:328)
at scala.collection.Iterator $ class。 foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at scala.collection.generic.Growable $ class。$ plus $ plus $ eq(Growable .scala:48)
at scala.collection.mutable.ArrayBuffer。$ plus $ plus $ eq(ArrayBuffer.scala:103)
at scala.collection.mutable.ArrayBuffer。$ plus $ plus $ eq (ArrayBuffer.scala:47)
at scala.collection.TraversableOnce $ class.to(TraversableOnce.scala:273)
at scala.collection.AbstractIterator.to(Iterator.scala:1157)
at scala.collection.TraversableOnce $ class.toBuffer(TraversableOnce.scala:265)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
a t scala.collection.TraversableOnce $ class.toArray(TraversableOnce.scala:252)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
at org.apache.spark.sql.catalyst .trees.TreeNode.transformChildren(TreeNode.scala:321)
at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
at org.apache .spark.sql.catalyst.analysis.Analyzer $ ResolveRelations $ .apply(Analyzer.scala:309)
at org.apache.spark.sql.catalyst.analysis.Analyzer $ ResolveRelations $ .apply(Analyzer.scala:
at org.apache.spark.sql.catalyst.rules.RuleExecutor $$ anonfun $ execute $ 1 $$ anonfun $ apply $ 1.apply(RuleExecutor.scala:83)
at org.apache。 spark.sql.catalyst.rules.RuleExecutor $$ anonfun $ execute $ 1 $$ anonfun $ apply $ 1.apply(RuleExecutor.scala:80)
at scala.collection.LinearSeqOptimized $ class.foldLeft(LinearSeqOptimized.scala:111 )
at scala.collection.immutable.List.foldLeft(List.scala:84)
at org.apache.spa rk.sql.catalyst.rules.RuleExecutor $$ anonfun $ execute $ 1.apply(RuleExecutor.scala:80)
at org.apache.spark.sql.catalyst.rules.RuleExecutor $$ anonfun $ execute $ 1.apply (RuleExecutor.scala:72)
at scala.collection.immutable.List.foreach(List.scala:318)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor .scala:72)
at org.apache.spark.sql.execution.QueryExecution.analyzed $ lzycompute(QueryExecution.scala:36)
at org.apache.spark.sql.execution.QueryExecution.analyzed (QueryExecution.scala:36)
at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34)
at org.apache.spark.sql.DataFrame。(DataFrame。 scala:133)
at org.apache.spark.sql.DataFrame $ .apply(DataFrame.scala:52)
at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817 )
at $ iwC $$ iwC $$ iwC $$ iwC $$ iwC $$ iwC $$ iwC $$ iwC。(:25)
at $ iwC $$ iwC $$ iwC $$ iwC $$ iwC $$ iwC $$ iwC。(:31)
at $ iwC $$ iwC $$ iwC $$ iwC $$ iwC $$ i wC。(:33)
at $ iwC $$ iwC $$ iwC $$ iwC $$ iwC。(:35)
at $ iwC $$ iwC $$ iwC $$ iwC。(:37 )
at $ iwC $$ iwC $$ iwC。(:39)
at $ iwC $$ iwC。(:41)
at $ iwC。(:43)
():
at。():
at。()
at。()
at。()
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl。 java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.apache.spark.repl.SparkIMain $ ReadEvalPrint.call(SparkIMain.scala:1065)
at org.apache.spark.repl.SparkIMain $ Request.loadAndRun(SparkIMain.scala:1346)
at org.apache.spark.repl.SparkIMain.loadAndRunReq $ 1(SparkIMain.scala:840)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMa in.scala:819)
at org.apache.spark.repl.SparkILoop.reallyInterpret $ 1(SparkILoop.scala:857)
at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala :902)
at org.apache.spark.repl.SparkILoop.reallyInterpret $ 1(SparkILoop.scala:875)
at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
at org.apache.spark.repl.SparkILoop.processLine $ 1(SparkILoop.scala:657)
在org.apache.spark.repl.SparkILoop.innerLoop $ 1(SparkILoop.scala:665)
在org.apache.spark.repl.SparkILoop.org $ apache $ spark $ repl $ SparkILoop $$循环(SparkILoop .scala:670)
at org.apache.spark.repl.SparkILoop $$ anonfun $ org $ apache $ spark $ repl $ SparkILoop $$进程$ 1.apply $ mcZ $ sp(SparkILoop.scala:997)
at org.apache.spark.repl.SparkILoop $$ anonfun $ org $ apache $ spark $ repl $ Sparkiloop $$进程$ 1.apply(SparkILoop.scala:945)
at org.apache.spark。 repl.Spar kiloop $$ anonfun $ org $ apache $ spark $ repl $ Sparkiloop $$ process $ 1.apply(SparkILoop.scala:945)
at scala.tools.nsc.util.ScalaClassLoader $ .savingContextLoader(ScalaClassLoader.scala:135 )
在org.apache.spark.repl.SparkILoop.org $ apache $ spark $ repl $ Sparkiloop $$进程(SparkILoop.scala:945)
在org.apache.spark.repl.SparkILoop。进程(SparkILoop.scala:1059)
在org.apache.spark.repl.Main $ .main(Main.scala:31)
在org.apache.spark.repl.Main.main(Main .scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke (DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:497)
at org.apache.spark.deploy.SparkSubmit $ .org $ apache $ spark $部署$ SparkSubmit $$ runMain(SparkSubmit.scala:738)
at org.apache.spark.deploy.SparkSubmit $ .doRunMain $ 1(SparkSubmit.s
at org.apache.spark.deploy.SparkSubmit $ .submit(SparkSubmit.scala:206)
at org.apache.spark.deploy.SparkSubmit $ .main(SparkSubmit.scala: 121)
在org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
引起:java.lang.ClassNotFoundException:org.apache.hadoop.hbase.util.Bytes
在java.lang.ClassLoader.loadClass处使用
(ClassLoader.java:424)$在java.lang.ClassLoader处使用
处理数据。 loadClass(ClassLoader.java:357)
at org.apache.spark.sql.hive.client.IsolatedClientLoader $$ anon $ 1.doLoadClass(IsolatedClientLoader.scala:216)
at org.apache.spark。 sql.hive.client.IsolatedClientLoader $$ anon $ 1.loadClass(IsolatedClientLoader.scala:201)$ b $ java.util.ClassLoader.loadClass(ClassLoader.java:357)
... 130 more


解决方案

请检查以下2个选项?



1-尝试在--jars中设置$ HBASE_HOME / lib / *?
2-尝试在hadoop-env.sh文件中设置hbase classpath?



谢谢


I am trying to access the Hive table in Spark which is created on top hBase table.

sqlContext.sql("select * from dev.hive_habse_table")

Eventhough I have included all the required jar, like this
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$(hbase classpath)

and Initiated spark shell with Hive hbase handler in HDP 2.5 ( Spark 1.6.3)

  spark-shell --master yarn-client --conf spark.yarn.queue=uk --executor-cores 10 --executor-memory 20G --num-executors 15 --driver-memory 2G --jars hive-hbase-handler-1.2.1000.2.4.0.1-6.jar

I am still getting Below error. Could any one help?

java.lang.NoClassDefFoundError: org/apache/hadoop/hbase/util/Bytes
    at org.apache.hadoop.hive.hbase.HBaseSerDe.parseColumnsMapping(HBaseSerDe.java:184)
    at org.apache.hadoop.hive.hbase.HBaseSerDeParameters.(HBaseSerDeParameters.java:73)
    at org.apache.hadoop.hive.hbase.HBaseSerDe.initialize(HBaseSerDe.java:117)
    at org.apache.hadoop.hive.serde2.AbstractSerDe.initialize(AbstractSerDe.java:53)
    at org.apache.hadoop.hive.serde2.SerDeUtils.initializeSerDe(SerDeUtils.java:521)
    at org.apache.hadoop.hive.metastore.MetaStoreUtils.getDeserializer(MetaStoreUtils.java:391)
    at org.apache.hadoop.hive.ql.metadata.Table.getDeserializerFromMetaStore(Table.java:276)
    at org.apache.hadoop.hive.ql.metadata.Table.getDeserializer(Table.java:258)
    at org.apache.hadoop.hive.ql.metadata.Table.getCols(Table.java:605)
    at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1$$anonfun$3.apply(ClientWrapper.scala:347)
    at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1$$anonfun$3.apply(ClientWrapper.scala:342)
    at scala.Option.map(Option.scala:145)
    at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1.apply(ClientWrapper.scala:342)
    at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$getTableOption$1.apply(ClientWrapper.scala:337)
    at org.apache.spark.sql.hive.client.ClientWrapper$$anonfun$withHiveState$1.apply(ClientWrapper.scala:295)
    at org.apache.spark.sql.hive.client.ClientWrapper.liftedTree1$1(ClientWrapper.scala:242)
    at org.apache.spark.sql.hive.client.ClientWrapper.retryLocked(ClientWrapper.scala:241)
    at org.apache.spark.sql.hive.client.ClientWrapper.withHiveState(ClientWrapper.scala:284)
    at org.apache.spark.sql.hive.client.ClientWrapper.getTableOption(ClientWrapper.scala:337)
    at org.apache.spark.sql.hive.client.ClientInterface$class.getTable(ClientInterface.scala:122)
    at org.apache.spark.sql.hive.client.ClientWrapper.getTable(ClientWrapper.scala:61)
    at org.apache.spark.sql.hive.HiveMetastoreCatalog.lookupRelation(HiveMetastoreCatalog.scala:414)
    at org.apache.spark.sql.hive.HiveContext$$anon$2.org$apache$spark$sql$catalyst$analysis$OverrideCatalog$$super$lookupRelation(HiveContext.scala:475)
    at org.apache.spark.sql.catalyst.analysis.OverrideCatalog$class.lookupRelation(Catalog.scala:162)
    at org.apache.spark.sql.hive.HiveContext$$anon$2.lookupRelation(HiveContext.scala:475)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.getTable(Analyzer.scala:302)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$9.applyOrElse(Analyzer.scala:314)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$9.applyOrElse(Analyzer.scala:309)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:57)
    at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:56)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:281)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:321)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$1.apply(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:281)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.catalyst.trees.TreeNode.transformChildren(TreeNode.scala:321)
    at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:54)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:309)
    at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:299)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:83)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:80)
    at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:111)
    at scala.collection.immutable.List.foldLeft(List.scala:84)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:80)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:72)
    at scala.collection.immutable.List.foreach(List.scala:318)
    at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:72)
    at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:36)
    at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:36)
    at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:34)
    at org.apache.spark.sql.DataFrame.(DataFrame.scala:133)
    at org.apache.spark.sql.DataFrame$.apply(DataFrame.scala:52)
    at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:817)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.(:25)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.(:31)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.(:33)
    at $iwC$$iwC$$iwC$$iwC$$iwC.(:35)
    at $iwC$$iwC$$iwC$$iwC.(:37)
    at $iwC$$iwC$$iwC.(:39)
    at $iwC$$iwC.(:41)
    at $iwC.(:43)
    at (:45)
    at .(:49)
    at .()
    at .(:7)
    at .()
    at $print()
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:497)
    at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
    at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
    at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
    at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
    at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
    at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:875)
    at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
    at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
    at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
    at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
    at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
    at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
    at org.apache.spark.repl.Main$.main(Main.scala:31)
    at org.apache.spark.repl.Main.main(Main.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:497)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:738)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.hbase.util.Bytes
    at scala.tools.nsc.interpreter.AbstractFileClassLoader.findClass(AbstractFileClassLoader.scala:83)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
    at org.apache.spark.sql.hive.client.IsolatedClientLoader$$anon$1.doLoadClass(IsolatedClientLoader.scala:216)
    at org.apache.spark.sql.hive.client.IsolatedClientLoader$$anon$1.loadClass(IsolatedClientLoader.scala:201)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
    ... 130 more
解决方案

Please can you check these 2 options wheather working ?

1- try setting $HBASE_HOME/lib/* in --jars ?2- try setting hbase classpath in the hadoop-env.sh file ?

Thanks

这篇关于Spark SqlContext和Hbase:java.lang.NoClassDefFoundError:org / apache / hadoop / hbase / util / Bytes的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持!

09-11 07:23