我对OpenTSDB的一些理解——服务启动过程
可以直接通过源代码包中net.opentsdb.tools.TSDMain类的main方法直接启动,不需要附带任何参数。
public static void main(String[] args) throws IOException {
Logger log = LoggerFactory.getLogger(TSDMain.class);
log.info("Starting.");
log.info(BuildData.revisionString());
log.info(BuildData.buildString());
try {
System.in.close(); // Release a FD we don't need.
} catch (Exception e) {
log.warn("Failed to close stdin", e);
}
final ArgP argp = new ArgP();
CliOptions.addCommon(argp);
argp.addOption("--port", "NUM", "TCP port to listen on.");
...
CliOptions.addAutoMetricFlag(argp);
args = CliOptions.parse(argp, args);
args = null; // free().
// get a config object
Config config = CliOptions.getConfig(argp);
// check for the required parameters
...
final ServerSocketChannelFactory factory;
int connections_limit = ;
try {
connections_limit = config.getInt("tsd.core.connections.limit");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid connections limit", 1);
}
if (config.getBoolean("tsd.network.async_io")) {
int workers = Runtime.getRuntime().availableProcessors() * 2;
if (config.hasProperty("tsd.network.worker_threads")) {
try {
workers = config.getInt("tsd.network.worker_threads");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid worker thread count", 1);
}
}
final Executor executor = Executors.newCachedThreadPool();
final NioServerBossPool boss_pool =
new NioServerBossPool(executor, 1, new Threads.BossThreadNamer());
final NioWorkerPool worker_pool = new NioWorkerPool(executor,
workers, new Threads.WorkerThreadNamer());
factory = new NioServerSocketChannelFactory(boss_pool, worker_pool);
} else {
factory = new OioServerSocketChannelFactory(
Executors.newCachedThreadPool(), Executors.newCachedThreadPool(),
new Threads.PrependThreadNamer());
}
...
try {
tsdb = new TSDB(config);
if (startup != null) {
tsdb.setStartupPlugin(startup);
}
tsdb.initializePlugins(true);
if (config.getBoolean("tsd.storage.hbase.prefetch_meta")) {
tsdb.preFetchHBaseMeta();
}
// Make sure we don't even start if we can't find our tables.
tsdb.checkNecessaryTablesExist().joinUninterruptibly();
registerShutdownHook();
final ServerBootstrap server = new ServerBootstrap(factory);
// This manager is capable of lazy init, but we force an init
// here to fail fast.
final RpcManager manager = RpcManager.instance(tsdb);
server.setPipelineFactory(new PipelineFactory(tsdb, manager, connections_limit));
if (config.hasProperty("tsd.network.backlog")) {
server.setOption("backlog", config.getInt("tsd.network.backlog"));
}
server.setOption("child.tcpNoDelay",
config.getBoolean("tsd.network.tcp_no_delay"));
server.setOption("child.keepAlive",
config.getBoolean("tsd.network.keep_alive"));
server.setOption("reuseAddress",
config.getBoolean("tsd.network.reuse_address"));
// null is interpreted as the wildcard address.
InetAddress bindAddress = null;
if (config.hasProperty("tsd.network.bind")) {
bindAddress = InetAddress.getByName(config.getString("tsd.network.bind"));
}
// we validated the network port config earlier
final InetSocketAddress addr = new InetSocketAddress(bindAddress,
config.getInt("tsd.network.port"));
server.bind(addr);
if (startup != null) {
startup.setReady(tsdb);
}
log.info("Ready to serve on " + addr);
} catch (Throwable e) {
factory.releaseExternalResources();
try {
if (tsdb != null)
tsdb.shutdown().joinUninterruptibly();
} catch (Exception e2) {
log.error("Failed to shutdown HBase client", e2);
}
throw new RuntimeException("Initialization failed", e);
}
// The server is now running in separate threads, we can exit main.
}
相关文章