publiclongdeserialize(DataTree dt, Map<Long, Integer> sessions)throws IOException { // we run through 100 snapshots (not all of them) // if we cannot get it running within 100 snapshots // we should give up // 上限100个有效的快照文件 按照zxid降序 List<File> snapList = findNValidSnapshots(100); if (snapList.size() == 0) { // 没有快照文件 返回-1标识 return -1L; } Filesnap=null; longsnapZxid= -1; booleanfoundValid=false; for (inti=0, snapListSize = snapList.size(); i < snapListSize; i++) { // 轮询找到的快照文件 反序列化出来zxid最大的有效文件内容 snap = snapList.get(i); LOG.info("Reading snapshot {}", snap); snapZxid = Util.getZxidFromName(snap.getName(), SNAPSHOT_FILE_PREFIX); // /tmp/zookeeper/data/version-2/snap.8 try (CheckedInputStreamsnapIS= SnapStream.getInputStream(snap)) { InputArchiveia= BinaryInputArchive.getArchive(snapIS); deserialize(dt, sessions, ia); // 快照文件反序列到内存 SnapStream.checkSealIntegrity(snapIS, ia);
// Digest feature was added after the CRC to make it backward // compatible, the older code can still read snapshots which // includes digest. // // To check the intact, after adding digest we added another // CRC check. if (dt.deserializeZxidDigest(ia, snapZxid)) { SnapStream.checkSealIntegrity(snapIS, ia); }
// compare the digest if this is not a fuzzy snapshot, we want to compare // and find inconsistent asap. if (dt.getDigestFromLoadedSnapshot() != null) { dt.compareSnapshotDigests(dt.lastProcessedZxid); } return dt.lastProcessedZxid; }