flink1.11启动后报这个错,然后任务就自己挂了,没有其它错误,也没有报我代码错 org.apache.hadoop.yarn.exceptions.YarnException:Containercontainer_1590424616102_807478_01_000002isnothandledbythisNodeManager atsun.reflect.NativeConstructorAccessorImpl.newInstance0(NativeMethod)~[?:1.8.0_191] atsun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)~[?:1.8.0_191] atsun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)~[?:1.8.0_191] atjava.lang.reflect.Constructor.newInstance(Constructor.java:423)~[?:1.8.0_191] atorg.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl.instantiateException(SerializedExceptionPBImpl.java:168)~[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl.deSerialize(SerializedExceptionPBImpl.java:106)~[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.client.api.impl.NMClientImpl.stopContainerInternal(NMClientImpl.java:297)~[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.client.api.impl.NMClientImpl.stopContainer(NMClientImpl.java:247)~[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$StatefulContainer$StopContainerTransition.transition(NMClientAsyncImpl.java:422)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$StatefulContainer$StopContainerTransition.transition(NMClientAsyncImpl.java:413)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.state.StateMachineFactory$MultipleInternalArc.doTransition(StateMachineFactory.java:385)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:302)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:46)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:448)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$StatefulContainer.handle(NMClientAsyncImpl.java:498)[release-sql-flink-1.11-v2.2.1.jar:?] atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$ContainerEventProcessor.run(NMClientAsyncImpl.java:557)[release-sql-flink-1.11-v2.2.1.jar:?] atjava.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)[?:1.8.0_191] atjava.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)[?:1.8.0_191] atjava.lang.Thread.run(Thread.java:748)[?:1.8.0_191] |
这样报错看着是Yarn NM的报错,你每次启动都是这样吗,还是偶然一次的
如果是偶然一次的,那这个报错应该是Flink stopContainer的时候Yarn NM正好重启了 Best, Yang 酷酷的浑蛋 <[hidden email]> 于2020年8月20日周四 上午10:59写道: > > > flink1.11启动后报这个错,然后任务就自己挂了,没有其它错误,也没有报我代码错 > > org.apache.hadoop.yarn.exceptions.YarnException:Containercontainer_1590424616102_807478_01_000002isnothandledbythisNodeManager > > atsun.reflect.NativeConstructorAccessorImpl.newInstance0(NativeMethod)~[?:1.8.0_191] > > atsun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)~[?:1.8.0_191] > > atsun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)~[?:1.8.0_191] > > atjava.lang.reflect.Constructor.newInstance(Constructor.java:423)~[?:1.8.0_191] > > atorg.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl.instantiateException(SerializedExceptionPBImpl.java:168)~[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl.deSerialize(SerializedExceptionPBImpl.java:106)~[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.client.api.impl.NMClientImpl.stopContainerInternal(NMClientImpl.java:297)~[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.client.api.impl.NMClientImpl.stopContainer(NMClientImpl.java:247)~[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$StatefulContainer$StopContainerTransition.transition(NMClientAsyncImpl.java:422)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$StatefulContainer$StopContainerTransition.transition(NMClientAsyncImpl.java:413)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.state.StateMachineFactory$MultipleInternalArc.doTransition(StateMachineFactory.java:385)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:302)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:46)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:448)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$StatefulContainer.handle(NMClientAsyncImpl.java:498)[release-sql-flink-1.11-v2.2.1.jar:?] > > atorg.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl$ContainerEventProcessor.run(NMClientAsyncImpl.java:557)[release-sql-flink-1.11-v2.2.1.jar:?] > > atjava.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)[?:1.8.0_191] > > atjava.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)[?:1.8.0_191] > atjava.lang.Thread.run(Thread.java:748)[?:1.8.0_191] > > |
Free forum by Nabble | Edit this page |