|
@@ -0,0 +1,1332 @@
|
|
|
+From 35f86d6a0a03295e4da9dff23eddfe4032350db3 Mon Sep 17 00:00:00 2001
|
|
|
+From: Roger McFarlane <[email protected]>
|
|
|
+Date: Tue, 17 Dec 2024 12:20:05 -0800
|
|
|
+Subject: [PATCH] Remove PersistentMemoryAllocator::GetAllocSize()
|
|
|
+
|
|
|
+This CL removes PersistentMemoryAllocator::GetAllocSize() in favor
|
|
|
+of allowing various other API entry points to return the alloc size.
|
|
|
+This mitigates potential TOCTOU errors where the size of an alloc
|
|
|
+is validated by one API then separately fetched in another call. The
|
|
|
+size could otherwise be manipulated in between initial validation and
|
|
|
+the subsequent fetch.
|
|
|
+
|
|
|
+(cherry picked from commit 23479ae0d3332f5525cfd9491137fc6c0ffcb46a)
|
|
|
+
|
|
|
+Bug: 378623799
|
|
|
+Change-Id: I8021cf4c07f1a96172deb2a252326e9ffa525798
|
|
|
+Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6025612
|
|
|
+Reviewed-by: Alexei Svitkine <[email protected]>
|
|
|
+Commit-Queue: Roger McFarlane <[email protected]>
|
|
|
+Cr-Original-Commit-Position: refs/heads/main@{#1394492}
|
|
|
+Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6098919
|
|
|
+Auto-Submit: Roger McFarlane <[email protected]>
|
|
|
+Commit-Queue: Luc Nguyen <[email protected]>
|
|
|
+Reviewed-by: Luc Nguyen <[email protected]>
|
|
|
+Cr-Commit-Position: refs/branch-heads/6834@{#2335}
|
|
|
+Cr-Branched-From: 47a3549fac11ee8cb7be6606001ede605b302b9f-refs/heads/main@{#1381561}
|
|
|
+---
|
|
|
+
|
|
|
+diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
|
|
|
+index 829a5c4..bf2ffe6 100644
|
|
|
+--- a/base/metrics/field_trial.cc
|
|
|
++++ b/base/metrics/field_trial.cc
|
|
|
+@@ -124,7 +124,7 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ // Returns the boundary value for comparing against the FieldTrial's added
|
|
|
+-// groups for a given |divisor| (total probability) and |entropy_value|.
|
|
|
++// groups for a given `divisor` (total probability) and `entropy_value`.
|
|
|
+ FieldTrial::Probability GetGroupBoundaryValue(
|
|
|
+ FieldTrial::Probability divisor,
|
|
|
+ double entropy_value) {
|
|
|
+@@ -138,7 +138,7 @@
|
|
|
+ const double kEpsilon = 1e-8;
|
|
|
+ const FieldTrial::Probability result =
|
|
|
+ static_cast<FieldTrial::Probability>(divisor * entropy_value + kEpsilon);
|
|
|
+- // Ensure that adding the epsilon still results in a value < |divisor|.
|
|
|
++ // Ensure that adding the epsilon still results in a value < `divisor`.
|
|
|
+ return std::min(result, divisor - 1);
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -259,7 +259,7 @@
|
|
|
+ if (forced_) {
|
|
|
+ DCHECK(!group_name_.empty());
|
|
|
+ if (name == group_name_) {
|
|
|
+- // Note that while |group_| may be equal to |kDefaultGroupNumber| on the
|
|
|
++ // Note that while `group_` may be equal to `kDefaultGroupNumber` on the
|
|
|
+ // forced trial, it will not have the same value as the default group
|
|
|
+ // number returned from the non-forced |FactoryGetFieldTrial()| call,
|
|
|
+ // which takes care to ensure that this does not happen.
|
|
|
+@@ -326,7 +326,7 @@
|
|
|
+ void FieldTrial::EnableBenchmarking() {
|
|
|
+ // We don't need to see field trials created via CreateFieldTrial() for
|
|
|
+ // benchmarking, because such field trials have only a single group and are
|
|
|
+- // not affected by randomization that |enable_benchmarking_| would disable.
|
|
|
++ // not affected by randomization that `enable_benchmarking_` would disable.
|
|
|
+ DCHECK_EQ(0u, FieldTrialList::GetRandomizedFieldTrialCount());
|
|
|
+ enable_benchmarking_ = true;
|
|
|
+ }
|
|
|
+@@ -453,7 +453,7 @@
|
|
|
+ if (group_ != kNotFinalized)
|
|
|
+ return;
|
|
|
+ accumulated_group_probability_ = divisor_;
|
|
|
+- // Here it's OK to use |kDefaultGroupNumber| since we can't be forced and not
|
|
|
++ // Here it's OK to use `kDefaultGroupNumber` since we can't be forced and not
|
|
|
+ // finalized.
|
|
|
+ DCHECK(!forced_);
|
|
|
+ SetGroupChoice(default_group_name_, kDefaultGroupNumber);
|
|
|
+@@ -807,7 +807,7 @@
|
|
|
+ field_trial = new FieldTrial(name, kTotalProbability, group_name, 0,
|
|
|
+ is_low_anonymity, is_overridden);
|
|
|
+ // The group choice will be finalized in this method. So
|
|
|
+- // |is_randomized_trial| should be false.
|
|
|
++ // `is_randomized_trial` should be false.
|
|
|
+ FieldTrialList::Register(field_trial, /*is_randomized_trial=*/false);
|
|
|
+ // Force the trial, which will also finalize the group choice.
|
|
|
+ field_trial->SetForced();
|
|
|
+@@ -910,12 +910,12 @@
|
|
|
+ if (!field_trial->ref_)
|
|
|
+ return false;
|
|
|
+
|
|
|
++ size_t allocated_size = 0;
|
|
|
+ const FieldTrial::FieldTrialEntry* entry =
|
|
|
+ global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
|
|
|
+- field_trial->ref_);
|
|
|
++ field_trial->ref_, &allocated_size);
|
|
|
++ CHECK(entry);
|
|
|
+
|
|
|
+- size_t allocated_size =
|
|
|
+- global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
|
|
|
+ uint64_t actual_size =
|
|
|
+ sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
|
|
|
+ if (allocated_size < actual_size)
|
|
|
+diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
|
|
|
+index d6cf8c6..07a3ab0 100644
|
|
|
+--- a/base/metrics/persistent_histogram_allocator.cc
|
|
|
++++ b/base/metrics/persistent_histogram_allocator.cc
|
|
|
+@@ -89,13 +89,13 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ // Calculate the number of bytes required to store all of a histogram's
|
|
|
+-// "counts". This will return zero (0) if |bucket_count| is not valid.
|
|
|
++// "counts". This will return zero (0) if `bucket_count` is not valid.
|
|
|
+ size_t CalculateRequiredCountsBytes(size_t bucket_count) {
|
|
|
+ // 2 because each "sample count" also requires a backup "logged count"
|
|
|
+ // used for calculating the delta during snapshot operations.
|
|
|
+ const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
|
|
|
+
|
|
|
+- // If the |bucket_count| is such that it would overflow the return type,
|
|
|
++ // If the `bucket_count` is such that it would overflow the return type,
|
|
|
+ // perhaps as the result of a malicious actor, then return zero to
|
|
|
+ // indicate the problem to the caller.
|
|
|
+ if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
|
|
|
+@@ -176,7 +176,7 @@
|
|
|
+ PersistentSparseHistogramDataManager::LoadRecords(
|
|
|
+ PersistentSampleMapRecords* sample_map_records,
|
|
|
+ std::optional<HistogramBase::Sample> until_value) {
|
|
|
+- // DataManager must be locked in order to access the |sample_records_|
|
|
|
++ // DataManager must be locked in order to access the `sample_records_`
|
|
|
+ // vectors.
|
|
|
+ base::AutoLock auto_lock(lock_);
|
|
|
+
|
|
|
+@@ -222,7 +222,7 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ // Return all references found that have not yet been seen by
|
|
|
+- // |sample_map_records|, up until |until_value| (if applicable).
|
|
|
++ // `sample_map_records`, up until `until_value` (if applicable).
|
|
|
+ std::vector<PersistentMemoryAllocator::Reference> new_references;
|
|
|
+ CHECK_GE(found_records.size(), sample_map_records->seen_);
|
|
|
+ auto new_found_records = base::make_span(found_records)
|
|
|
+@@ -230,9 +230,9 @@
|
|
|
+ new_references.reserve(new_found_records.size());
|
|
|
+ for (const auto& new_record : new_found_records) {
|
|
|
+ new_references.push_back(new_record.reference);
|
|
|
+- // Maybe references after |until_value| were found. Stop here immediately in
|
|
|
++ // Maybe references after `until_value` were found. Stop here immediately in
|
|
|
+ // such a case, since the caller will not expect any more samples after
|
|
|
+- // |until_value|.
|
|
|
++ // `until_value`.
|
|
|
+ if (until_value.has_value() && new_record.value == until_value.value()) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+@@ -321,9 +321,9 @@
|
|
|
+ // count data (while these must reference the persistent counts) and always
|
|
|
+ // add it to the local list of known histograms (while these may be simple
|
|
|
+ // references to histograms in other processes).
|
|
|
++ size_t length = 0;
|
|
|
+ PersistentHistogramData* data =
|
|
|
+- memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
|
|
|
+- const size_t length = memory_allocator_->GetAllocSize(ref);
|
|
|
++ memory_allocator_->GetAsObject<PersistentHistogramData>(ref, &length);
|
|
|
+
|
|
|
+ // Check that metadata is reasonable: name is null-terminated and non-empty,
|
|
|
+ // ID fields have been loaded with a hash of the name (0 is considered
|
|
|
+@@ -331,7 +331,7 @@
|
|
|
+ if (!data || data->name[0] == '\0' ||
|
|
|
+ reinterpret_cast<char*>(data)[length - 1] != '\0' ||
|
|
|
+ data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
|
|
|
+- // Note: Sparse histograms use |id + 1| in |logged_metadata|.
|
|
|
++ // Note: Sparse histograms use `id + 1` in `logged_metadata`.
|
|
|
+ (data->logged_metadata.id != data->samples_metadata.id &&
|
|
|
+ data->logged_metadata.id != data->samples_metadata.id + 1) ||
|
|
|
+ // Most non-matching values happen due to truncated names. Ideally, we
|
|
|
+@@ -374,7 +374,7 @@
|
|
|
+ histogram_data->histogram_type = histogram_type;
|
|
|
+ histogram_data->flags = flags | HistogramBase::kIsPersistent;
|
|
|
+
|
|
|
+- // |counts_ref| relies on being zero'd out initially. Even though this
|
|
|
++ // `counts_ref` relies on being zero'd out initially. Even though this
|
|
|
+ // should always be the case, manually zero it out again here in case there
|
|
|
+ // was memory corruption (e.g. if the memory was mapped from a corrupted
|
|
|
+ // spare file).
|
|
|
+@@ -388,7 +388,7 @@
|
|
|
+ size_t bucket_count = bucket_ranges->bucket_count();
|
|
|
+ size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
|
|
|
+ if (counts_bytes == 0) {
|
|
|
+- // |bucket_count| was out-of-range.
|
|
|
++ // `bucket_count` was out-of-range.
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -396,8 +396,8 @@
|
|
|
+ // objects for re-use, it would be dangerous for one to hold a reference
|
|
|
+ // from a persistent allocator that is not the global one (which is
|
|
|
+ // permanent once set). If this stops being the case, this check can
|
|
|
+- // become an "if" condition beside "!ranges_ref" below and before
|
|
|
+- // set_persistent_reference() farther down.
|
|
|
++ // become an `if` condition beside `!ranges_ref` below and before
|
|
|
++ // `set_persistent_reference()` farther down.
|
|
|
+ DCHECK_EQ(this, GlobalHistogramAllocator::Get());
|
|
|
+
|
|
|
+ // Re-use an existing BucketRanges persistent allocation if one is known;
|
|
|
+@@ -434,7 +434,7 @@
|
|
|
+ if (ranges_ref && histogram_data) {
|
|
|
+ histogram_data->minimum = minimum;
|
|
|
+ histogram_data->maximum = maximum;
|
|
|
+- // |bucket_count| must fit within 32-bits or the allocation of the counts
|
|
|
++ // `bucket_count` must fit within 32-bits or the allocation of the counts
|
|
|
+ // array would have failed for being too large; the allocator supports
|
|
|
+ // less than 4GB total size.
|
|
|
+ histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
|
|
|
+@@ -447,7 +447,7 @@
|
|
|
+
|
|
|
+ if (histogram_data) {
|
|
|
+ // Create the histogram using resources in persistent memory. This ends up
|
|
|
+- // resolving the "ref" values stored in histogram_data instad of just
|
|
|
++ // resolving the `ref` values stored in histogram_data instead of just
|
|
|
+ // using what is already known above but avoids duplicating the switch
|
|
|
+ // statement here and serves as a double-check that everything is
|
|
|
+ // correct before commiting the new histogram to persistent space.
|
|
|
+@@ -588,17 +588,16 @@
|
|
|
+ uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
|
|
|
+ uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
|
|
|
+
|
|
|
++ size_t allocated_bytes = 0;
|
|
|
+ HistogramBase::Sample* ranges_data =
|
|
|
+ memory_allocator_->GetAsArray<HistogramBase::Sample>(
|
|
|
+ histogram_ranges_ref, kTypeIdRangesArray,
|
|
|
+- PersistentMemoryAllocator::kSizeAny);
|
|
|
++ PersistentMemoryAllocator::kSizeAny, &allocated_bytes);
|
|
|
+
|
|
|
+ const uint32_t max_buckets =
|
|
|
+ std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
|
|
|
+ size_t required_bytes =
|
|
|
+ (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
|
|
|
+- size_t allocated_bytes =
|
|
|
+- memory_allocator_->GetAllocSize(histogram_ranges_ref);
|
|
|
+ if (!ranges_data || histogram_bucket_count < 2 ||
|
|
|
+ histogram_bucket_count >= max_buckets ||
|
|
|
+ allocated_bytes < required_bytes) {
|
|
|
+@@ -626,11 +625,14 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
|
|
|
++ if (counts_bytes == 0) {
|
|
|
++ return nullptr;
|
|
|
++ }
|
|
|
++
|
|
|
+ PersistentMemoryAllocator::Reference counts_ref =
|
|
|
+ histogram_data_ptr->counts_ref.load(std::memory_order_acquire);
|
|
|
+- if (counts_bytes == 0 ||
|
|
|
+- (counts_ref != 0 &&
|
|
|
+- memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
|
|
|
++ if (counts_ref != 0 && !memory_allocator_->GetAsArray<uint8_t>(
|
|
|
++ counts_ref, kTypeIdCountsArray, counts_bytes)) {
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -958,7 +960,7 @@
|
|
|
+ // histogram allocator was initialized.
|
|
|
+ //
|
|
|
+ // TODO(crbug.com/40945497): CHECK(histogram_count == 0) and remove emit of
|
|
|
+- // early histogram count once |histogram_count| is reliably zero (0) for all
|
|
|
++ // early histogram count once `histogram_count` is reliably zero (0) for all
|
|
|
+ // process types.
|
|
|
+ size_t histogram_count = StatisticsRecorder::GetHistogramCount();
|
|
|
+ if (histogram_count != 0) {
|
|
|
+diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
|
|
|
+index da6756c..e9e7f893 100644
|
|
|
+--- a/base/metrics/persistent_histogram_allocator.h
|
|
|
++++ b/base/metrics/persistent_histogram_allocator.h
|
|
|
+@@ -48,8 +48,8 @@
|
|
|
+ ~PersistentSparseHistogramDataManager();
|
|
|
+
|
|
|
+ // Returns an object that manages persistent-sample-map records for a given
|
|
|
+- // |id|. The returned object queries |this| for records. Hence, the returned
|
|
|
+- // object must not outlive |this|.
|
|
|
++ // `id`. The returned object queries `this` for records. Hence, the returned
|
|
|
++ // object must not outlive `this`.
|
|
|
+ std::unique_ptr<PersistentSampleMapRecords> CreateSampleMapRecords(
|
|
|
+ uint64_t id);
|
|
|
+
|
|
|
+@@ -72,19 +72,19 @@
|
|
|
+ std::vector<ReferenceAndSample>* GetSampleMapRecordsWhileLocked(uint64_t id)
|
|
|
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
|
|
+
|
|
|
+- // Returns sample-map records belonging to the specified |sample_map_records|.
|
|
|
+- // Only records found that were not yet seen by |sample_map_records| will be
|
|
|
+- // returned, determined by its |seen_| field. Records found for other
|
|
|
++ // Returns sample-map records belonging to the specified `sample_map_records`.
|
|
|
++ // Only records found that were not yet seen by `sample_map_records` will be
|
|
|
++ // returned, determined by its `seen_` field. Records found for other
|
|
|
+ // sample-maps are held for later use without having to iterate again. This
|
|
|
+ // should be called only from a PersistentSampleMapRecords object because
|
|
|
+ // those objects have a contract that there are no other threads accessing the
|
|
|
+- // internal records_ field of the object that is passed in. If |until_value|
|
|
|
++ // internal records_ field of the object that is passed in. If `until_value`
|
|
|
+ // is set and a sample is found with said value, the search will stop early
|
|
|
+ // and the last entry in the returned vector will be that sample.
|
|
|
+ // Note: The returned vector is not guaranteed to contain all unseen records
|
|
|
+- // for |sample_map_records|. If this is needed, then repeatedly call this
|
|
|
++ // for `sample_map_records`. If this is needed, then repeatedly call this
|
|
|
+ // until an empty vector is returned, which definitely means that
|
|
|
+- // |sample_map_records| has seen all its records.
|
|
|
++ // `sample_map_records` has seen all its records.
|
|
|
+ std::vector<PersistentMemoryAllocator::Reference> LoadRecords(
|
|
|
+ PersistentSampleMapRecords* sample_map_records,
|
|
|
+ std::optional<HistogramBase::Sample> until_value);
|
|
|
+@@ -113,7 +113,7 @@
|
|
|
+ // Constructs an instance of this class. The manager object must live longer
|
|
|
+ // than all instances of this class that reference it, which is not usually
|
|
|
+ // a problem since these objects are generally managed from within that
|
|
|
+- // manager instance. The same caveats apply for for the |records| vector.
|
|
|
++ // manager instance. The same caveats apply for for the `records` vector.
|
|
|
+ PersistentSampleMapRecords(
|
|
|
+ PersistentSparseHistogramDataManager* data_manager,
|
|
|
+ uint64_t sample_map_id,
|
|
|
+@@ -126,18 +126,18 @@
|
|
|
+
|
|
|
+ ~PersistentSampleMapRecords();
|
|
|
+
|
|
|
+- // Gets next references to persistent sample-map records. If |until_value| is
|
|
|
++ // Gets next references to persistent sample-map records. If `until_value` is
|
|
|
+ // passed, and said value is found, then it will be the last element in the
|
|
|
+ // returned vector. The type and layout of the data being referenced is
|
|
|
+ // defined entirely within the PersistentSampleMap class.
|
|
|
+ // Note: The returned vector is not guaranteed to contain all unseen records
|
|
|
+- // for |this|. If this is needed, then repeatedly call this until an empty
|
|
|
+- // vector is returned, which definitely means that |this| has seen all its
|
|
|
++ // for `this`. If this is needed, then repeatedly call this until an empty
|
|
|
++ // vector is returned, which definitely means that `this` has seen all its
|
|
|
+ // records.
|
|
|
+ std::vector<PersistentMemoryAllocator::Reference> GetNextRecords(
|
|
|
+ std::optional<HistogramBase::Sample> until_value);
|
|
|
+
|
|
|
+- // Creates a new persistent sample-map record for sample |value| and returns
|
|
|
++ // Creates a new persistent sample-map record for sample `value` and returns
|
|
|
+ // a reference to it.
|
|
|
+ PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
|
|
|
+
|
|
|
+@@ -161,7 +161,7 @@
|
|
|
+ // ID of PersistentSampleMap to which these records apply.
|
|
|
+ const uint64_t sample_map_id_;
|
|
|
+
|
|
|
+- // This is the count of how many "records" have already been read by |this|.
|
|
|
++ // This is the count of how many "records" have already been read by `this`.
|
|
|
+ size_t seen_ = 0;
|
|
|
+
|
|
|
+ // This is the set of records found during iteration through memory, owned by
|
|
|
+@@ -186,7 +186,7 @@
|
|
|
+ // See PersistentMemoryAllocator::Iterator for more information.
|
|
|
+ class BASE_EXPORT Iterator {
|
|
|
+ public:
|
|
|
+- // Constructs an iterator on a given |allocator|, starting at the beginning.
|
|
|
++ // Constructs an iterator on a given `allocator`, starting at the beginning.
|
|
|
+ // The allocator must live beyond the lifetime of the iterator.
|
|
|
+ explicit Iterator(PersistentHistogramAllocator* allocator);
|
|
|
+
|
|
|
+@@ -199,7 +199,7 @@
|
|
|
+ std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
|
|
|
+
|
|
|
+ // Gets the next histogram from persistent memory, ignoring one particular
|
|
|
+- // reference in the process. Pass |ignore| of zero (0) to ignore nothing.
|
|
|
++ // reference in the process. Pass `ignore` of zero (0) to ignore nothing.
|
|
|
+ std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
|
|
|
+
|
|
|
+ private:
|
|
|
+@@ -240,7 +240,7 @@
|
|
|
+
|
|
|
+ // Recreate a Histogram from data held in persistent memory. Though this
|
|
|
+ // object will be local to the current process, the sample data will be
|
|
|
+- // shared with all other threads referencing it. This method takes a |ref|
|
|
|
++ // shared with all other threads referencing it. This method takes a `ref`
|
|
|
+ // to where the top-level histogram data may be found in this allocator.
|
|
|
+ // This method will return null if any problem is detected with the data.
|
|
|
+ std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
|
|
|
+@@ -257,7 +257,7 @@
|
|
|
+ Reference* ref_ptr);
|
|
|
+
|
|
|
+ // Finalize the creation of the histogram, making it available to other
|
|
|
+- // processes if |registered| (as in: added to the StatisticsRecorder) is
|
|
|
++ // processes if `registered` (as in: added to the StatisticsRecorder) is
|
|
|
+ // True, forgetting it otherwise.
|
|
|
+ void FinalizeHistogram(Reference ref, bool registered);
|
|
|
+
|
|
|
+@@ -280,35 +280,35 @@
|
|
|
+ const HistogramBase* histogram);
|
|
|
+
|
|
|
+ // Returns an object that manages persistent-sample-map records for a given
|
|
|
+- // |id|. The returned object queries |sparse_histogram_data_manager_| for
|
|
|
++ // `id`. The returned object queries `sparse_histogram_data_manager_` for
|
|
|
+ // records. Hence, the returned object must not outlive
|
|
|
+- // |sparse_histogram_data_manager_| (and hence |this|).
|
|
|
++ // `sparse_histogram_data_manager_` (and hence `this`).
|
|
|
+ std::unique_ptr<PersistentSampleMapRecords> CreateSampleMapRecords(
|
|
|
+ uint64_t id);
|
|
|
+
|
|
|
+ // Creates internal histograms for tracking memory use and allocation sizes
|
|
|
+- // for allocator of |name| (which can simply be the result of Name()). This
|
|
|
+- // is done seperately from construction for situations such as when the
|
|
|
++ // for allocator of `name` (which can simply be the result of Name()). This
|
|
|
++ // is done separately from construction for situations such as when the
|
|
|
+ // histograms will be backed by memory provided by this very allocator.
|
|
|
+ //
|
|
|
+ // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must
|
|
|
+- // be updated with the following histograms for each |name| param:
|
|
|
++ // be updated with the following histograms for each `name` param:
|
|
|
+ // UMA.PersistentAllocator.name.UsedPct
|
|
|
+ void CreateTrackingHistograms(std::string_view name);
|
|
|
+ void UpdateTrackingHistograms();
|
|
|
+
|
|
|
+- // Sets the internal |ranges_manager_|, which will be used by the allocator to
|
|
|
+- // register BucketRanges. Takes ownership of the passed |ranges_manager|.
|
|
|
++ // Sets the internal `ranges_manager_`, which will be used by the allocator to
|
|
|
++ // register BucketRanges. Takes ownership of the passed `ranges_manager`.
|
|
|
+ //
|
|
|
+- // WARNING: Since histograms may be created from |this| from multiple threads,
|
|
|
++ // WARNING: Since histograms may be created from `this` from multiple threads,
|
|
|
+ // for example through a direct call to CreateHistogram(), or while iterating
|
|
|
+- // through |this|, then the passed manager may also be accessed concurrently.
|
|
|
++ // through `this`, then the passed manager may also be accessed concurrently.
|
|
|
+ // Hence, care must be taken to ensure that either:
|
|
|
+ // 1) The passed manager is threadsafe (see ThreadSafeRangesManager), or
|
|
|
+- // 2) |this| is not used concurrently.
|
|
|
++ // 2) `this` is not used concurrently.
|
|
|
+ void SetRangesManager(RangesManager* ranges_manager);
|
|
|
+
|
|
|
+- // Clears the internal |last_created_| reference so testing can validate
|
|
|
++ // Clears the internal `last_created_` reference so testing can validate
|
|
|
+ // operation without that optimization.
|
|
|
+ void ClearLastCreatedReferenceForTesting();
|
|
|
+
|
|
|
+@@ -334,7 +334,7 @@
|
|
|
+ PersistentHistogramData* histogram_data_ptr);
|
|
|
+
|
|
|
+ // Gets or creates an object in the global StatisticsRecorder matching
|
|
|
+- // the |histogram| passed. Null is returned if one was not found and
|
|
|
++ // the `histogram` passed. Null is returned if one was not found and
|
|
|
+ // one could not be created.
|
|
|
+ HistogramBase* GetOrCreateStatisticsRecorderHistogram(
|
|
|
+ const HistogramBase* histogram);
|
|
|
+@@ -370,7 +370,7 @@
|
|
|
+
|
|
|
+ ~GlobalHistogramAllocator() override;
|
|
|
+
|
|
|
+- // Create a global allocator using the passed-in memory |base|, |size|, and
|
|
|
++ // Create a global allocator using the passed-in memory `base`, `size`, and
|
|
|
+ // other parameters. Ownership of the memory segment remains with the caller.
|
|
|
+ static void CreateWithPersistentMemory(void* base,
|
|
|
+ size_t size,
|
|
|
+@@ -379,17 +379,17 @@
|
|
|
+ std::string_view name);
|
|
|
+
|
|
|
+ // Create a global allocator using an internal block of memory of the
|
|
|
+- // specified |size| taken from the heap.
|
|
|
++ // specified `size` taken from the heap.
|
|
|
+ static void CreateWithLocalMemory(size_t size,
|
|
|
+ uint64_t id,
|
|
|
+ std::string_view name);
|
|
|
+
|
|
|
+ #if !BUILDFLAG(IS_NACL)
|
|
|
+- // Create a global allocator by memory-mapping a |file|. If the file does
|
|
|
+- // not exist, it will be created with the specified |size|. If the file does
|
|
|
++ // Create a global allocator by memory-mapping a `file`. If the file does
|
|
|
++ // not exist, it will be created with the specified `size`. If the file does
|
|
|
+ // exist, the allocator will use and add to its contents, ignoring the passed
|
|
|
+ // size in favor of the existing size. Returns whether the global allocator
|
|
|
+- // was set. If |exclusive_write| is true, the file will be opened in a mode
|
|
|
++ // was set. If `exclusive_write` is true, the file will be opened in a mode
|
|
|
+ // that disallows multiple concurrent writers (no effect on non-Windows).
|
|
|
+ static bool CreateWithFile(const FilePath& file_path,
|
|
|
+ size_t size,
|
|
|
+@@ -397,9 +397,9 @@
|
|
|
+ std::string_view name,
|
|
|
+ bool exclusive_write = false);
|
|
|
+
|
|
|
+- // Creates a new file at |active_path|. If it already exists, it will first be
|
|
|
+- // moved to |base_path|. In all cases, any old file at |base_path| will be
|
|
|
+- // removed. If |spare_path| is non-empty and exists, that will be renamed and
|
|
|
++ // Creates a new file at `active_path`. If it already exists, it will first be
|
|
|
++ // moved to `base_path`. In all cases, any old file at `base_path` will be
|
|
|
++ // removed. If `spare_path` is non-empty and exists, that will be renamed and
|
|
|
+ // used as the active file. Otherwise, the file will be created using the
|
|
|
+ // given size, id, and name. Returns whether the global allocator was set.
|
|
|
+ static bool CreateWithActiveFile(const FilePath& base_path,
|
|
|
+@@ -410,9 +410,9 @@
|
|
|
+ std::string_view name);
|
|
|
+
|
|
|
+ // Uses ConstructBaseActivePairFilePaths() to build a pair of file names which
|
|
|
+- // are then used for CreateWithActiveFile(). |name| is used for both the
|
|
|
++ // are then used for CreateWithActiveFile(). `name` is used for both the
|
|
|
+ // internal name for the allocator and also for the name of the file inside
|
|
|
+- // |dir|.
|
|
|
++ // `dir`.
|
|
|
+ static bool CreateWithActiveFileInDir(const FilePath& dir,
|
|
|
+ size_t size,
|
|
|
+ uint64_t id,
|
|
|
+@@ -447,7 +447,7 @@
|
|
|
+ #endif
|
|
|
+
|
|
|
+ // Create a global allocator using a block of shared memory accessed
|
|
|
+- // through the given |region|. The allocator maps the shared memory into
|
|
|
++ // through the given `region`. The allocator maps the shared memory into
|
|
|
+ // current process's virtual address space and frees it upon destruction.
|
|
|
+ // The memory will continue to live if other processes have access to it.
|
|
|
+ static void CreateWithSharedMemoryRegion(
|
|
|
+@@ -486,7 +486,7 @@
|
|
|
+ bool HasPersistentLocation() const;
|
|
|
+
|
|
|
+ // Moves the file being used to persist this allocator's data to the directory
|
|
|
+- // specified by |dir|. Returns whether the operation was successful.
|
|
|
++ // specified by `dir`. Returns whether the operation was successful.
|
|
|
+ bool MovePersistentFile(const FilePath& dir);
|
|
|
+
|
|
|
+ // Writes the internal data to a previously set location. This is generally
|
|
|
+diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
|
|
|
+index 9e5f585..59473af 100644
|
|
|
+--- a/base/metrics/persistent_memory_allocator.cc
|
|
|
++++ b/base/metrics/persistent_memory_allocator.cc
|
|
|
+@@ -59,7 +59,7 @@
|
|
|
+ // the metadata, the version number can be queried to operate in a backward-
|
|
|
+ // compatible manner until the memory segment is completely re-initalized.
|
|
|
+ // Note: If you update the metadata in a non-backwards compatible way, reset
|
|
|
+-// |kCompatibleVersions|. Otherwise, add the previous version.
|
|
|
++// `kCompatibleVersions`. Otherwise, add the previous version.
|
|
|
+ constexpr uint32_t kGlobalVersion = 3;
|
|
|
+ static constexpr uint32_t kOldCompatibleVersions[] = {2};
|
|
|
+
|
|
|
+@@ -146,12 +146,12 @@
|
|
|
+
|
|
|
+ // The "iterable" queue is an M&S Queue as described here, append-only:
|
|
|
+ // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
|
|
|
+- // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
|
|
|
++ // `queue` needs to be 64-bit aligned and is itself a multiple of 64 bits.
|
|
|
+ volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
|
|
|
+ volatile BlockHeader queue; // Empty block for linked-list head/tail.
|
|
|
+ };
|
|
|
+
|
|
|
+-// The "queue" block header is used to detect "last node" so that zero/null
|
|
|
++// The `queue` block header is used to detect the "last node" so that zero/null
|
|
|
+ // can be used to indicate that it hasn't been added at all. It is part of
|
|
|
+ // the SharedMetadata structure which itself is always located at offset zero.
|
|
|
+ const PersistentMemoryAllocator::Reference
|
|
|
+@@ -207,7 +207,8 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ PersistentMemoryAllocator::Reference
|
|
|
+-PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
|
|
|
++PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return,
|
|
|
++ size_t* alloc_size) {
|
|
|
+ // Make a copy of the existing count of found-records, acquiring all changes
|
|
|
+ // made to the allocator, notably "freeptr" (see comment in loop for why
|
|
|
+ // the load of that value cannot be moved above here) that occurred during
|
|
|
+@@ -218,12 +219,13 @@
|
|
|
+ // "count" was fetched _after_ "freeptr" then it would be possible for
|
|
|
+ // this thread to be interrupted between them and other threads perform
|
|
|
+ // multiple allocations, make-iterables, and iterations (with the included
|
|
|
+- // increment of |record_count_|) culminating in the check at the bottom
|
|
|
++ // increment of `record_count_`) culminating in the check at the bottom
|
|
|
+ // mistakenly determining that a loop exists. Isn't this stuff fun?
|
|
|
+ uint32_t count = record_count_.load(std::memory_order_acquire);
|
|
|
+
|
|
|
+ Reference last = last_record_.load(std::memory_order_acquire);
|
|
|
+- Reference next;
|
|
|
++ Reference next = 0;
|
|
|
++ size_t next_size = 0;
|
|
|
+ while (true) {
|
|
|
+ const volatile BlockHeader* block =
|
|
|
+ allocator_->GetBlock(last, 0, 0, true, false);
|
|
|
+@@ -244,7 +246,7 @@
|
|
|
+ next = block->next.load(std::memory_order_acquire);
|
|
|
+ if (next == kReferenceQueue) // No next allocation in queue.
|
|
|
+ return kReferenceNull;
|
|
|
+- block = allocator_->GetBlock(next, 0, 0, false, false);
|
|
|
++ block = allocator_->GetBlock(next, 0, 0, false, false, &next_size);
|
|
|
+ if (!block) { // Memory is corrupt.
|
|
|
+ allocator_->SetCorrupt();
|
|
|
+ return kReferenceNull;
|
|
|
+@@ -285,21 +287,29 @@
|
|
|
+ // It does not matter if it falls behind temporarily so long as it never
|
|
|
+ // gets ahead.
|
|
|
+ record_count_.fetch_add(1, std::memory_order_release);
|
|
|
++ if (alloc_size) {
|
|
|
++ *alloc_size = next_size;
|
|
|
++ }
|
|
|
+ return next;
|
|
|
+ }
|
|
|
+
|
|
|
+ PersistentMemoryAllocator::Reference
|
|
|
+-PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
|
|
|
++PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match,
|
|
|
++ size_t* alloc_size) {
|
|
|
+ Reference ref;
|
|
|
++ size_t size;
|
|
|
+ uint32_t type_found;
|
|
|
+- while ((ref = GetNext(&type_found)) != 0) {
|
|
|
+- if (type_found == type_match)
|
|
|
++ while ((ref = GetNext(&type_found, &size)) != 0) {
|
|
|
++ if (type_found == type_match) {
|
|
|
++ if (alloc_size) {
|
|
|
++ *alloc_size = size;
|
|
|
++ }
|
|
|
+ return ref;
|
|
|
++ }
|
|
|
+ }
|
|
|
+ return kReferenceNull;
|
|
|
+ }
|
|
|
+
|
|
|
+-
|
|
|
+ // static
|
|
|
+ bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
|
|
|
+ size_t size,
|
|
|
+@@ -474,12 +484,12 @@
|
|
|
+
|
|
|
+ const char* PersistentMemoryAllocator::Name() const {
|
|
|
+ Reference name_ref = shared_meta()->name;
|
|
|
+- const char* name_cstr =
|
|
|
+- GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
|
|
|
++ size_t name_length = 0;
|
|
|
++ const char* name_cstr = GetAsArray<char>(
|
|
|
++ name_ref, 0, PersistentMemoryAllocator::kSizeAny, &name_length);
|
|
|
+ if (!name_cstr)
|
|
|
+ return "";
|
|
|
+
|
|
|
+- size_t name_length = GetAllocSize(name_ref);
|
|
|
+ if (name_cstr[name_length - 1] != '\0') {
|
|
|
+ NOTREACHED();
|
|
|
+ }
|
|
|
+@@ -536,23 +546,6 @@
|
|
|
+ return ref;
|
|
|
+ }
|
|
|
+
|
|
|
+-size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
|
|
|
+- const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
|
|
|
+- if (!block)
|
|
|
+- return 0;
|
|
|
+- uint32_t size = block->size;
|
|
|
+- // Header was verified by GetBlock() but a malicious actor could change
|
|
|
+- // the value between there and here. Check it again.
|
|
|
+- uint32_t total_size;
|
|
|
+- if (size <= sizeof(BlockHeader) ||
|
|
|
+- !base::CheckAdd(ref, size).AssignIfValid(&total_size) ||
|
|
|
+- total_size > mem_size_) {
|
|
|
+- SetCorrupt();
|
|
|
+- return 0;
|
|
|
+- }
|
|
|
+- return size - sizeof(BlockHeader);
|
|
|
+-}
|
|
|
+-
|
|
|
+ uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
|
|
|
+ const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
|
|
|
+ if (!block)
|
|
|
+@@ -622,13 +615,15 @@
|
|
|
+
|
|
|
+ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
|
|
|
+ size_t req_size,
|
|
|
+- uint32_t type_id) {
|
|
|
+- return AllocateImpl(req_size, type_id);
|
|
|
++ uint32_t type_id,
|
|
|
++ size_t* alloc_size) {
|
|
|
++ return AllocateImpl(req_size, type_id, alloc_size);
|
|
|
+ }
|
|
|
+
|
|
|
+ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
|
|
|
+ size_t req_size,
|
|
|
+- uint32_t type_id) {
|
|
|
++ uint32_t type_id,
|
|
|
++ size_t* alloc_size) {
|
|
|
+ DCHECK_NE(access_mode_, kReadOnly);
|
|
|
+
|
|
|
+ // Validate req_size to ensure it won't overflow when used as 32-bit value.
|
|
|
+@@ -790,6 +785,11 @@
|
|
|
+ block->size = static_cast<uint32_t>(size);
|
|
|
+ block->cookie = kBlockCookieAllocated;
|
|
|
+ block->type_id.store(type_id, std::memory_order_relaxed);
|
|
|
++
|
|
|
++ // Return the allocation size if requested.
|
|
|
++ if (alloc_size) {
|
|
|
++ *alloc_size = size - sizeof(BlockHeader);
|
|
|
++ }
|
|
|
+ return freeptr;
|
|
|
+ }
|
|
|
+ }
|
|
|
+@@ -901,17 +901,16 @@
|
|
|
+ return CheckFlag(&shared_meta()->flags, kFlagFull);
|
|
|
+ }
|
|
|
+
|
|
|
+-// Dereference a block |ref| and ensure that it's valid for the desired
|
|
|
+-// |type_id| and |size|. |special| indicates that we may try to access block
|
|
|
+-// headers not available to callers but still accessed by this module. By
|
|
|
+-// having internal dereferences go through this same function, the allocator
|
|
|
+-// is hardened against corruption.
|
|
|
+ const volatile PersistentMemoryAllocator::BlockHeader*
|
|
|
+ PersistentMemoryAllocator::GetBlock(Reference ref,
|
|
|
+ uint32_t type_id,
|
|
|
+ size_t size,
|
|
|
+ bool queue_ok,
|
|
|
+- bool free_ok) const {
|
|
|
++ bool free_ok,
|
|
|
++ size_t* alloc_size) const {
|
|
|
++ // The caller cannot request `alloc_size` if `queue_ok` or `free_ok`.
|
|
|
++ CHECK(!(alloc_size && (queue_ok || free_ok)));
|
|
|
++
|
|
|
+ // Handle special cases.
|
|
|
+ if (ref == kReferenceQueue && queue_ok)
|
|
|
+ return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
|
|
|
+@@ -930,29 +929,39 @@
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+
|
|
|
++ const volatile BlockHeader* const block =
|
|
|
++ reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
|
|
|
++
|
|
|
+ // Validation of referenced block-header.
|
|
|
+ if (!free_ok) {
|
|
|
+- const volatile BlockHeader* const block =
|
|
|
+- reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
|
|
|
+ if (block->cookie != kBlockCookieAllocated)
|
|
|
+ return nullptr;
|
|
|
+- if (block->size < size)
|
|
|
+- return nullptr;
|
|
|
+- uint32_t block_size;
|
|
|
+- if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) {
|
|
|
++ const uint32_t block_size = block->size;
|
|
|
++ if (block_size < size) {
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+- if (block_size > mem_size_) {
|
|
|
++ // Find a validate the end of the block.
|
|
|
++ uint32_t block_end_ref;
|
|
|
++ if (!base::CheckAdd(ref, block_size).AssignIfValid(&block_end_ref)) {
|
|
|
++ return nullptr;
|
|
|
++ }
|
|
|
++ if (block_end_ref > mem_size_) {
|
|
|
++ // The end of the alloc extends beyond the allocator's bounds.
|
|
|
++ SetCorrupt();
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
+ if (type_id != 0 &&
|
|
|
+ block->type_id.load(std::memory_order_relaxed) != type_id) {
|
|
|
+ return nullptr;
|
|
|
+ }
|
|
|
++ // Return `alloc_size` if requested by the caller.
|
|
|
++ if (alloc_size) {
|
|
|
++ *alloc_size = block_size - sizeof(BlockHeader);
|
|
|
++ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // Return pointer to block data.
|
|
|
+- return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
|
|
|
++ return block;
|
|
|
+ }
|
|
|
+
|
|
|
+ void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
|
|
|
+@@ -973,10 +982,11 @@
|
|
|
+ const volatile void* PersistentMemoryAllocator::GetBlockData(
|
|
|
+ Reference ref,
|
|
|
+ uint32_t type_id,
|
|
|
+- size_t size) const {
|
|
|
++ size_t size,
|
|
|
++ size_t* alloc_size) const {
|
|
|
+ DCHECK(size > 0);
|
|
|
+ const volatile BlockHeader* block =
|
|
|
+- GetBlock(ref, type_id, size, false, false);
|
|
|
++ GetBlock(ref, type_id, size, false, false, alloc_size);
|
|
|
+ if (!block)
|
|
|
+ return nullptr;
|
|
|
+ return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
|
|
|
+@@ -1155,14 +1165,14 @@
|
|
|
+ base::BlockingType::MAY_BLOCK);
|
|
|
+
|
|
|
+ // Calculate begin/end addresses so that the first byte of every page
|
|
|
+- // in that range can be read. Keep within the used space. The |volatile|
|
|
|
++ // in that range can be read. Keep within the used space. The `volatile`
|
|
|
+ // keyword makes it so the compiler can't make assumptions about what is
|
|
|
+ // in a given memory location and thus possibly avoid the read.
|
|
|
+ const volatile char* mem_end = mem_base_ + used();
|
|
|
+ const volatile char* mem_begin = mem_base_;
|
|
|
+
|
|
|
+ // Iterate over the memory a page at a time, reading the first byte of
|
|
|
+- // every page. The values are added to a |total| so that the compiler
|
|
|
++ // every page. The values are added to a `total` so that the compiler
|
|
|
+ // can't omit the read.
|
|
|
+ int total = 0;
|
|
|
+ for (const volatile char* memory = mem_begin; memory < mem_end;
|
|
|
+@@ -1170,7 +1180,7 @@
|
|
|
+ total += *memory;
|
|
|
+ }
|
|
|
+
|
|
|
+- // Tell the compiler that |total| is used so that it can't optimize away
|
|
|
++ // Tell the compiler that `total` is used so that it can't optimize away
|
|
|
+ // the memory accesses above.
|
|
|
+ debug::Alias(&total);
|
|
|
+ }
|
|
|
+@@ -1240,7 +1250,8 @@
|
|
|
+ #endif // !BUILDFLAG(IS_NACL)
|
|
|
+
|
|
|
+ if (!ref) {
|
|
|
+- ref = allocator_->Allocate(size_, type_);
|
|
|
++ [[maybe_unused]] size_t alloc_size = 0;
|
|
|
++ ref = allocator_->Allocate(size_, type_, &alloc_size);
|
|
|
+ if (!ref) {
|
|
|
+ return span<uint8_t>();
|
|
|
+ }
|
|
|
+@@ -1256,7 +1267,7 @@
|
|
|
+ // allocation, and stored its reference. Purge the allocation that was
|
|
|
+ // just done and use the other one instead.
|
|
|
+ DCHECK_EQ(type_, allocator_->GetType(existing));
|
|
|
+- DCHECK_LE(size_, allocator_->GetAllocSize(existing));
|
|
|
++ DCHECK_LE(size_, alloc_size);
|
|
|
+ allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
|
|
|
+ ref = existing;
|
|
|
+ #if !BUILDFLAG(IS_NACL)
|
|
|
+@@ -1292,13 +1303,13 @@
|
|
|
+ SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_);
|
|
|
+ if (ref == 0xC8799269) {
|
|
|
+ // There are many crash reports containing the corrupted "0xC8799269"
|
|
|
+- // value in |ref|. This value is actually a "magic" number to indicate
|
|
|
++ // value in `ref`. This value is actually a "magic" number to indicate
|
|
|
+ // that a certain block in persistent memory was successfully allocated,
|
|
|
+ // so it should not appear there. Include some extra crash keys to see if
|
|
|
+ // the surrounding values were also corrupted. If so, the value before
|
|
|
+ // would be the size of the allocated object, and the value after would be
|
|
|
+ // the type id of the allocated object. If they are not corrupted, these
|
|
|
+- // would contain |ranges_checksum| and the start of |samples_metadata|
|
|
|
++ // would contain `ranges_checksum` and the start of `samples_metadata`
|
|
|
+ // respectively (see PersistentHistogramData struct). We do some pointer
|
|
|
+ // arithmetic here -- it should theoretically be safe, unless something
|
|
|
+ // went terribly wrong...
|
|
|
+diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
|
|
|
+index 3ab70be..4cf07a1 100644
|
|
|
+--- a/base/metrics/persistent_memory_allocator.h
|
|
|
++++ b/base/metrics/persistent_memory_allocator.h
|
|
|
+@@ -171,13 +171,13 @@
|
|
|
+ // eventually quit.
|
|
|
+ class BASE_EXPORT Iterator {
|
|
|
+ public:
|
|
|
+- // Constructs an iterator on a given |allocator|, starting at the beginning.
|
|
|
++ // Constructs an iterator on a given `allocator`, starting at the beginning.
|
|
|
+ // The allocator must live beyond the lifetime of the iterator. This class
|
|
|
+ // has read-only access to the allocator (hence "const") but the returned
|
|
|
+ // references can be used on a read/write version, too.
|
|
|
+ explicit Iterator(const PersistentMemoryAllocator* allocator);
|
|
|
+
|
|
|
+- // As above but resuming from the |starting_after| reference. The first call
|
|
|
++ // As above but resuming from the `starting_after` reference. The first call
|
|
|
+ // to GetNext() will return the next object found after that reference. The
|
|
|
+ // reference must be to an "iterable" object; references to non-iterable
|
|
|
+ // objects (those that never had MakeIterable() called for them) will cause
|
|
|
+@@ -193,7 +193,7 @@
|
|
|
+ // Resets the iterator back to the beginning.
|
|
|
+ void Reset();
|
|
|
+
|
|
|
+- // Resets the iterator, resuming from the |starting_after| reference.
|
|
|
++ // Resets the iterator, resuming from the `starting_after` reference.
|
|
|
+ void Reset(Reference starting_after);
|
|
|
+
|
|
|
+ // Returns the previously retrieved reference, or kReferenceNull if none.
|
|
|
+@@ -201,17 +201,17 @@
|
|
|
+ // that value.
|
|
|
+ Reference GetLast();
|
|
|
+
|
|
|
+- // Gets the next iterable, storing that type in |type_return|. The actual
|
|
|
++ // Gets the next iterable, storing that type in `type_return`. The actual
|
|
|
+ // return value is a reference to the allocation inside the allocator or
|
|
|
+ // zero if there are no more. GetNext() may still be called again at a
|
|
|
+ // later time to retrieve any new allocations that have been added.
|
|
|
+- Reference GetNext(uint32_t* type_return);
|
|
|
++ Reference GetNext(uint32_t* type_return, size_t* alloc_size = nullptr);
|
|
|
+
|
|
|
+- // Similar to above but gets the next iterable of a specific |type_match|.
|
|
|
++ // Similar to above but gets the next iterable of a specific `type_match`.
|
|
|
+ // This should not be mixed with calls to GetNext() because any allocations
|
|
|
+ // skipped here due to a type mis-match will never be returned by later
|
|
|
+ // calls to GetNext() meaning it's possible to completely miss entries.
|
|
|
+- Reference GetNextOfType(uint32_t type_match);
|
|
|
++ Reference GetNextOfType(uint32_t type_match, size_t* alloc_size = nullptr);
|
|
|
+
|
|
|
+ // As above but works using object type.
|
|
|
+ template <typename T>
|
|
|
+@@ -244,8 +244,8 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ // Convert a generic pointer back into a reference. A null reference will
|
|
|
+- // be returned if |memory| is not inside the persistent segment or does not
|
|
|
+- // point to an object of the specified |type_id|.
|
|
|
++ // be returned if `memory` is not inside the persistent segment or does not
|
|
|
++ // point to an object of the specified `type_id`.
|
|
|
+ Reference GetAsReference(const void* memory, uint32_t type_id) const {
|
|
|
+ return allocator_->GetAsReference(memory, type_id);
|
|
|
+ }
|
|
|
+@@ -308,12 +308,12 @@
|
|
|
+ // The allocator operates on any arbitrary block of memory. Creation and
|
|
|
+ // persisting or sharing of that block with another process is the
|
|
|
+ // responsibility of the caller. The allocator needs to know only the
|
|
|
+- // block's |base| address, the total |size| of the block, and any internal
|
|
|
+- // |page| size (zero if not paged) across which allocations should not span.
|
|
|
+- // The |id| is an arbitrary value the caller can use to identify a
|
|
|
++ // block's `base` address, the total `size` of the block, and any internal
|
|
|
++ // `page` size (zero if not paged) across which allocations should not span.
|
|
|
++ // The `id` is an arbitrary value the caller can use to identify a
|
|
|
+ // particular memory segment. It will only be loaded during the initial
|
|
|
+ // creation of the segment and can be checked by the caller for consistency.
|
|
|
+- // The |name|, if provided, is used to distinguish histograms for this
|
|
|
++ // The `name`, if provided, is used to distinguish histograms for this
|
|
|
+ // allocator. Only the primary owner of the segment should define this value;
|
|
|
+ // other processes can learn it from the shared state. If the access mode
|
|
|
+ // is kReadOnly then no changes will be made to it. The resulting object
|
|
|
+@@ -367,12 +367,12 @@
|
|
|
+ uint8_t GetMemoryState() const;
|
|
|
+
|
|
|
+ // Create internal histograms for tracking memory use and allocation sizes
|
|
|
+- // for allocator of |name| (which can simply be the result of Name()). This
|
|
|
+- // is done seperately from construction for situations such as when the
|
|
|
++ // for allocator of `name` (which can simply be the result of Name()). This
|
|
|
++ // is done separately from construction for situations such as when the
|
|
|
+ // histograms will be backed by memory provided by this very allocator.
|
|
|
+ //
|
|
|
+ // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must
|
|
|
+- // be updated with the following histograms for each |name| param:
|
|
|
++ // be updated with the following histograms for each `name` param:
|
|
|
+ // UMA.PersistentAllocator.name.Errors
|
|
|
+ // UMA.PersistentAllocator.name.UsedPct
|
|
|
+ void CreateTrackingHistograms(std::string_view name);
|
|
|
+@@ -382,13 +382,13 @@
|
|
|
+ // OS that all the data should be sent to the disk immediately. This is
|
|
|
+ // useful in the rare case where something has just been stored that needs
|
|
|
+ // to survive a hard shutdown of the machine like from a power failure.
|
|
|
+- // The |sync| parameter indicates if this call should block until the flush
|
|
|
++ // The `sync` parameter indicates if this call should block until the flush
|
|
|
+ // is complete but is only advisory and may or may not have an effect
|
|
|
+ // depending on the capabilities of the OS. Synchronous flushes are allowed
|
|
|
+- // only from threads that are allowed to do I/O but since |sync| is only
|
|
|
++ // only from threads that are allowed to do I/O but since `sync` is only
|
|
|
+ // advisory, all flushes should be done on IO-capable threads.
|
|
|
+- // TODO: Since |sync| is ignored on Windows, consider making it re-post on a
|
|
|
+- // background thread with |sync| set to true so that |sync| is not just
|
|
|
++ // TODO: Since `sync` is ignored on Windows, consider making it re-post on a
|
|
|
++ // background thread with `sync` set to true so that `sync` is not just
|
|
|
+ // advisory.
|
|
|
+ void Flush(bool sync);
|
|
|
+
|
|
|
+@@ -400,9 +400,9 @@
|
|
|
+ size_t size() const { return mem_size_; }
|
|
|
+ size_t used() const;
|
|
|
+
|
|
|
+- // Get an object referenced by a |ref|. For safety reasons, the |type_id|
|
|
|
+- // code and size-of(|T|) are compared to ensure the reference is valid
|
|
|
+- // and cannot return an object outside of the memory segment. A |type_id| of
|
|
|
++ // Get an object referenced by a `ref`. For safety reasons, the `type_id`
|
|
|
++ // code and size-of(`T`) are compared to ensure the reference is valid
|
|
|
++ // and cannot return an object outside of the memory segment. A `type_id` of
|
|
|
+ // kTypeIdAny (zero) will match any though the size is still checked. NULL is
|
|
|
+ // returned if any problem is detected, such as corrupted storage or incorrect
|
|
|
+ // parameters. Callers MUST check that the returned value is not-null EVERY
|
|
|
+@@ -422,7 +422,7 @@
|
|
|
+ // largest architecture, including at the end.
|
|
|
+ //
|
|
|
+ // To protected against mistakes, all objects must have the attribute
|
|
|
+- // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
|
|
|
++ // `kExpectedInstanceSize` (static constexpr size_t) that is a hard-coded
|
|
|
+ // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
|
|
|
+ // instance size is not fixed, at least one build will fail.
|
|
|
+ //
|
|
|
+@@ -442,27 +442,28 @@
|
|
|
+ // nature of that keyword to the caller. It can add it back, if necessary,
|
|
|
+ // based on knowledge of how the allocator is being used.
|
|
|
+ template <typename T>
|
|
|
+- T* GetAsObject(Reference ref) {
|
|
|
++ T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) {
|
|
|
+ static_assert(std::is_standard_layout_v<T>, "only standard objects");
|
|
|
+ static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
|
|
|
+ static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
|
|
|
+ return const_cast<T*>(reinterpret_cast<volatile T*>(
|
|
|
+- GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
|
|
|
++ GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size)));
|
|
|
+ }
|
|
|
+ template <typename T>
|
|
|
+- const T* GetAsObject(Reference ref) const {
|
|
|
++ const T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) const {
|
|
|
+ static_assert(std::is_standard_layout_v<T>, "only standard objects");
|
|
|
+ static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
|
|
|
+ static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
|
|
|
+ return const_cast<const T*>(reinterpret_cast<const volatile T*>(
|
|
|
+- GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
|
|
|
++ GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size)));
|
|
|
+ }
|
|
|
+
|
|
|
+- // Like GetAsObject but get an array of simple, fixed-size types.
|
|
|
++ // Like GetAsObject() but get an array of simple, fixed-size types.
|
|
|
+ //
|
|
|
+- // Use a |count| of the required number of array elements, or kSizeAny.
|
|
|
+- // GetAllocSize() can be used to calculate the upper bound but isn't reliable
|
|
|
+- // because padding can make space for extra elements that were not written.
|
|
|
++ // Use a `count` of the required number of array elements, or kSizeAny.
|
|
|
++ // The, optionally returned, `alloc_size` can be used to calculate the upper
|
|
|
++ // bound but isn't reliable because padding can make space for extra elements
|
|
|
++ // that were not written.
|
|
|
+ //
|
|
|
+ // Remember that an array of char is a string but may not be NUL terminated.
|
|
|
+ //
|
|
|
+@@ -470,29 +471,29 @@
|
|
|
+ // compatibilty when using these accessors. Only use fixed-size types such
|
|
|
+ // as char, float, double, or (u)intXX_t.
|
|
|
+ template <typename T>
|
|
|
+- T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
|
|
|
++ T* GetAsArray(Reference ref,
|
|
|
++ uint32_t type_id,
|
|
|
++ size_t count,
|
|
|
++ size_t* alloc_size = nullptr) {
|
|
|
+ static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
|
|
|
+ return const_cast<T*>(reinterpret_cast<volatile T*>(
|
|
|
+- GetBlockData(ref, type_id, count * sizeof(T))));
|
|
|
++ GetBlockData(ref, type_id, count * sizeof(T), alloc_size)));
|
|
|
+ }
|
|
|
+ template <typename T>
|
|
|
+- const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
|
|
|
++ const T* GetAsArray(Reference ref,
|
|
|
++ uint32_t type_id,
|
|
|
++ size_t count,
|
|
|
++ size_t* alloc_size = nullptr) const {
|
|
|
+ static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
|
|
|
+ return const_cast<const char*>(reinterpret_cast<const volatile T*>(
|
|
|
+- GetBlockData(ref, type_id, count * sizeof(T))));
|
|
|
++ GetBlockData(ref, type_id, count * sizeof(T), alloc_size)));
|
|
|
+ }
|
|
|
+
|
|
|
+ // Get the corresponding reference for an object held in persistent memory.
|
|
|
+- // If the |memory| is not valid or the type does not match, a kReferenceNull
|
|
|
++ // If the `memory` is not valid or the type does not match, a kReferenceNull
|
|
|
+ // result will be returned.
|
|
|
+ Reference GetAsReference(const void* memory, uint32_t type_id) const;
|
|
|
+
|
|
|
+- // Get the number of bytes allocated to a block. This is useful when storing
|
|
|
+- // arrays in order to validate the ending boundary. The returned value will
|
|
|
+- // include any padding added to achieve the required alignment and so could
|
|
|
+- // be larger than given in the original Allocate() request.
|
|
|
+- size_t GetAllocSize(Reference ref) const;
|
|
|
+-
|
|
|
+ // Access the internal "type" of an object. This generally isn't necessary
|
|
|
+ // but can be used to "clear" the type and so effectively mark it as deleted
|
|
|
+ // even though the memory stays valid and allocated. Changing the type is
|
|
|
+@@ -500,8 +501,8 @@
|
|
|
+ // It will return false if the existing type is not what is expected.
|
|
|
+ //
|
|
|
+ // Changing the type doesn't mean the data is compatible with the new type.
|
|
|
+- // Passing true for |clear| will zero the memory after the type has been
|
|
|
+- // changed away from |from_type_id| but before it becomes |to_type_id| meaning
|
|
|
++ // Passing true for `clear` will zero the memory after the type has been
|
|
|
++ // changed away from `from_type_id` but before it becomes `to_type_id` meaning
|
|
|
+ // that it is done in a manner that is thread-safe. Memory is guaranteed to
|
|
|
+ // be zeroed atomically by machine-word in a monotonically increasing order.
|
|
|
+ //
|
|
|
+@@ -553,13 +554,15 @@
|
|
|
+ // While the above works much like malloc & free, these next methods provide
|
|
|
+ // an "object" interface similar to new and delete.
|
|
|
+
|
|
|
+- // Reserve space in the memory segment of the desired |size| and |type_id|.
|
|
|
++ // Reserve space in the memory segment of the desired `size` and `type_id`.
|
|
|
+ //
|
|
|
+ // A return value of zero indicates the allocation failed, otherwise the
|
|
|
+ // returned reference can be used by any process to get a real pointer via
|
|
|
+- // the GetAsObject() or GetAsArray calls. The actual allocated size may be
|
|
|
++ // the GetAsObject() or GetAsArray() calls. The actual allocated size may be
|
|
|
+ // larger and will always be a multiple of 8 bytes (64 bits).
|
|
|
+- Reference Allocate(size_t size, uint32_t type_id);
|
|
|
++ Reference Allocate(size_t size,
|
|
|
++ uint32_t type_id,
|
|
|
++ size_t* alloc_size = nullptr);
|
|
|
+
|
|
|
+ // Allocate and construct an object in persistent memory. The type must have
|
|
|
+ // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
|
|
|
+@@ -586,7 +589,7 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ // Similar to New, above, but construct the object out of an existing memory
|
|
|
+- // block and of an expected type. If |clear| is true, memory will be zeroed
|
|
|
++ // block and of an expected type. If `clear` is true, memory will be zeroed
|
|
|
+ // before construction. Though this is not standard object behavior, it
|
|
|
+ // is present to match with new allocations that always come from zeroed
|
|
|
+ // memory. Anything previously present simply ceases to exist; no destructor
|
|
|
+@@ -596,13 +599,16 @@
|
|
|
+ // results. USE WITH CARE!
|
|
|
+ template <typename T>
|
|
|
+ T* New(Reference ref, uint32_t from_type_id, bool clear) {
|
|
|
+- DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
|
|
|
+ // Make sure the memory is appropriate. This won't be used until after
|
|
|
+ // the type is changed but checking first avoids the possibility of having
|
|
|
+ // to change the type back.
|
|
|
+- void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
|
|
|
++ size_t alloc_size = 0;
|
|
|
++ void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T), &alloc_size));
|
|
|
+ if (!mem)
|
|
|
+ return nullptr;
|
|
|
++
|
|
|
++ DCHECK_LE(sizeof(T), alloc_size) << "alloc not big enough for obj";
|
|
|
++
|
|
|
+ // Ensure the allocator's internal alignment is sufficient for this object.
|
|
|
+ // This protects against coding errors in the allocator.
|
|
|
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
|
|
|
+@@ -633,7 +639,7 @@
|
|
|
+ // First change the type to "transitioning" so there is no race condition
|
|
|
+ // where another thread could find the object through iteration while it
|
|
|
+ // is been destructed. This will "acquire" the memory so no changes get
|
|
|
+- // reordered before it. It will fail if |ref| is invalid.
|
|
|
++ // reordered before it. It will fail if `ref` is invalid.
|
|
|
+ if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
|
|
|
+ return;
|
|
|
+ // Destruct the object.
|
|
|
+@@ -677,7 +683,7 @@
|
|
|
+ };
|
|
|
+
|
|
|
+ // Constructs the allocator. Everything is the same as the public allocator
|
|
|
+- // except |memory| which is a structure with additional information besides
|
|
|
++ // except `memory` which is a structure with additional information besides
|
|
|
+ // the base address.
|
|
|
+ PersistentMemoryAllocator(Memory memory,
|
|
|
+ size_t size,
|
|
|
+@@ -715,32 +721,52 @@
|
|
|
+ }
|
|
|
+
|
|
|
+ // Actual method for doing the allocation.
|
|
|
+- Reference AllocateImpl(size_t size, uint32_t type_id);
|
|
|
++ Reference AllocateImpl(size_t size, uint32_t type_id, size_t* alloc_size);
|
|
|
+
|
|
|
+- // Gets the block header associated with a specific reference.
|
|
|
++ // Dereferences a block `ref` to retrieve a pointer to the block header for
|
|
|
++ // the reference. This method ensures that the referenced block is valid for
|
|
|
++ // the desired `type_id` and `size`. Optionally, if `alloc_sizes` is not
|
|
|
++ // nullptr, the validated size of the underlying allocation is returned.
|
|
|
++ //
|
|
|
++ // Special cases for internal use only:
|
|
|
++ //
|
|
|
++ // * If `queue_ok` is true and `ref` is kReferenceQueueindicates then the
|
|
|
++ // block header for the allocation queue is returned.
|
|
|
++ //
|
|
|
++ // * if `free_ok` then the block header is allowed to point to a block that
|
|
|
++ // may not be in the `allocated` state. This bypasses block validation.
|
|
|
++ //
|
|
|
++ // Because they bypass block valoidation, it is not premitted to request the
|
|
|
++ // `alloc_size` when either of `queue_ok` or `free_ok` are true.
|
|
|
+ const volatile BlockHeader* GetBlock(Reference ref,
|
|
|
+ uint32_t type_id,
|
|
|
+ size_t size,
|
|
|
+ bool queue_ok,
|
|
|
+- bool free_ok) const;
|
|
|
++ bool free_ok,
|
|
|
++ size_t* alloc_size = nullptr) const;
|
|
|
+ volatile BlockHeader* GetBlock(Reference ref,
|
|
|
+ uint32_t type_id,
|
|
|
+ size_t size,
|
|
|
+ bool queue_ok,
|
|
|
+- bool free_ok) {
|
|
|
++ bool free_ok,
|
|
|
++ size_t* alloc_size = nullptr) {
|
|
|
+ return const_cast<volatile BlockHeader*>(
|
|
|
+ const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
|
|
|
+- ref, type_id, size, queue_ok, free_ok));
|
|
|
++ ref, type_id, size, queue_ok, free_ok, alloc_size));
|
|
|
+ }
|
|
|
+
|
|
|
+ // Gets the actual data within a block associated with a specific reference.
|
|
|
+ const volatile void* GetBlockData(Reference ref,
|
|
|
+ uint32_t type_id,
|
|
|
+- size_t size) const;
|
|
|
+- volatile void* GetBlockData(Reference ref, uint32_t type_id, size_t size) {
|
|
|
++ size_t size,
|
|
|
++ size_t* alloc_size = nullptr) const;
|
|
|
++ volatile void* GetBlockData(Reference ref,
|
|
|
++ uint32_t type_id,
|
|
|
++ size_t size,
|
|
|
++ size_t* alloc_size = nullptr) {
|
|
|
+ return const_cast<volatile void*>(
|
|
|
+ const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
|
|
|
+- ref, type_id, size));
|
|
|
++ ref, type_id, size, alloc_size));
|
|
|
+ }
|
|
|
+
|
|
|
+ // Returns the offset to the first free space segment.
|
|
|
+@@ -785,12 +811,12 @@
|
|
|
+ ~LocalPersistentMemoryAllocator() override;
|
|
|
+
|
|
|
+ private:
|
|
|
+- // Allocates a block of local memory of the specified |size|, ensuring that
|
|
|
++ // Allocates a block of local memory of the specified `size`, ensuring that
|
|
|
+ // the memory will not be physically allocated until accessed and will read
|
|
|
+ // as zero when that happens.
|
|
|
+ static Memory AllocateLocalMemory(size_t size, std::string_view name);
|
|
|
+
|
|
|
+- // Deallocates a block of local |memory| of the specified |size|.
|
|
|
++ // Deallocates a block of local `memory` of the specified `size`.
|
|
|
+ static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
|
|
|
+ };
|
|
|
+
|
|
|
+@@ -858,8 +884,8 @@
|
|
|
+ class BASE_EXPORT FilePersistentMemoryAllocator
|
|
|
+ : public PersistentMemoryAllocator {
|
|
|
+ public:
|
|
|
+- // A |max_size| of zero will use the length of the file as the maximum
|
|
|
+- // size. The |file| object must have been already created with sufficient
|
|
|
++ // A `max_size` of zero will use the length of the file as the maximum
|
|
|
++ // size. The `file` object must have been already created with sufficient
|
|
|
+ // permissions (read, read/write, or read/write/extend).
|
|
|
+ FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
|
|
|
+ size_t max_size,
|
|
|
+@@ -909,18 +935,18 @@
|
|
|
+ public:
|
|
|
+ using Reference = PersistentMemoryAllocator::Reference;
|
|
|
+
|
|
|
+- // Creates a delayed allocation using the specified |allocator|. When
|
|
|
+- // needed, the memory will be allocated using the specified |type| and
|
|
|
+- // |size|. If |offset| is given, the returned pointer will be at that
|
|
|
++ // Creates a delayed allocation using the specified `allocator`. When
|
|
|
++ // needed, the memory will be allocated using the specified `type` and
|
|
|
++ // `size`. If `offset` is given, the returned pointer will be at that
|
|
|
+ // offset into the segment; this allows combining allocations into a
|
|
|
+ // single persistent segment to reduce overhead and means an "all or
|
|
|
+- // nothing" request. Note that |size| is always the total memory size
|
|
|
+- // and |offset| is just indicating the start of a block within it.
|
|
|
++ // nothing" request. Note that `size` is always the total memory size
|
|
|
++ // and `offset` is just indicating the start of a block within it.
|
|
|
+ //
|
|
|
+- // Once allocated, a reference to the segment will be stored at |ref|.
|
|
|
++ // Once allocated, a reference to the segment will be stored at `ref`.
|
|
|
+ // This shared location must be initialized to zero (0); it is checked
|
|
|
+ // with every Get() request to see if the allocation has already been
|
|
|
+- // done. If reading |ref| outside of this object, be sure to do an
|
|
|
++ // done. If reading `ref` outside of this object, be sure to do an
|
|
|
+ // "acquire" load. Don't write to it -- leave that to this object.
|
|
|
+ DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
|
|
|
+ std::atomic<Reference>* ref,
|
|
|
+diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
|
|
|
+index 01ac173..a0b96616 100644
|
|
|
+--- a/base/metrics/persistent_memory_allocator_unittest.cc
|
|
|
++++ b/base/metrics/persistent_memory_allocator_unittest.cc
|
|
|
+@@ -141,11 +141,12 @@
|
|
|
+ ASSERT_TRUE(obj1);
|
|
|
+ Reference block1 = allocator_->GetAsReference(obj1);
|
|
|
+ ASSERT_NE(0U, block1);
|
|
|
+- EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
|
|
|
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
|
|
|
+- EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
|
|
|
+- EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
|
|
|
+- allocator_->GetAllocSize(block1));
|
|
|
++ size_t alloc_size_1 = 0;
|
|
|
++ EXPECT_NE(nullptr,
|
|
|
++ allocator_->GetAsObject<TestObject1>(block1, &alloc_size_1));
|
|
|
++ EXPECT_LE(sizeof(TestObject1), alloc_size_1);
|
|
|
++ EXPECT_GT(sizeof(TestObject1) + kAllocAlignment, alloc_size_1);
|
|
|
+ PersistentMemoryAllocator::MemoryInfo meminfo1;
|
|
|
+ allocator_->GetMemoryInfo(&meminfo1);
|
|
|
+ EXPECT_EQ(meminfo0.total, meminfo1.total);
|
|
|
+@@ -181,11 +182,12 @@
|
|
|
+ ASSERT_TRUE(obj2);
|
|
|
+ Reference block2 = allocator_->GetAsReference(obj2);
|
|
|
+ ASSERT_NE(0U, block2);
|
|
|
+- EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
|
|
|
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
|
|
|
+- EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
|
|
|
+- EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
|
|
|
+- allocator_->GetAllocSize(block2));
|
|
|
++ size_t alloc_size_2 = 0;
|
|
|
++ EXPECT_NE(nullptr,
|
|
|
++ allocator_->GetAsObject<TestObject2>(block2, &alloc_size_2));
|
|
|
++ EXPECT_LE(sizeof(TestObject2), alloc_size_2);
|
|
|
++ EXPECT_GT(sizeof(TestObject2) + kAllocAlignment, alloc_size_2);
|
|
|
+ PersistentMemoryAllocator::MemoryInfo meminfo2;
|
|
|
+ allocator_->GetMemoryInfo(&meminfo2);
|
|
|
+ EXPECT_EQ(meminfo1.total, meminfo2.total);
|
|
|
+@@ -966,10 +968,10 @@
|
|
|
+ uint32_t type_id;
|
|
|
+ Reference ref;
|
|
|
+ while ((ref = iter.GetNext(&type_id)) != 0) {
|
|
|
++ size_t size = 0;
|
|
|
+ const char* data = allocator.GetAsArray<char>(
|
|
|
+- ref, 0, PersistentMemoryAllocator::kSizeAny);
|
|
|
++ ref, 0, PersistentMemoryAllocator::kSizeAny, &size);
|
|
|
+ uint32_t type = allocator.GetType(ref);
|
|
|
+- size_t size = allocator.GetAllocSize(ref);
|
|
|
+ // Ensure compiler can't optimize-out above variables.
|
|
|
+ (void)data;
|
|
|
+ (void)type;
|
|
|
+diff --git a/components/metrics/persistent_system_profile.cc b/components/metrics/persistent_system_profile.cc
|
|
|
+index 5f53ff2..3cef1f2 100644
|
|
|
+--- a/components/metrics/persistent_system_profile.cc
|
|
|
++++ b/components/metrics/persistent_system_profile.cc
|
|
|
+@@ -109,7 +109,7 @@
|
|
|
+ if (!AddSegment(remaining_size))
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+- // Write out as much of the data as possible. |data| and |remaining_size|
|
|
|
++ // Write out as much of the data as possible. `data` and `remaining_size`
|
|
|
+ // are updated in place.
|
|
|
+ if (!WriteData(type, &data, &remaining_size))
|
|
|
+ return false;
|
|
|
+@@ -152,8 +152,7 @@
|
|
|
+
|
|
|
+ bool PersistentSystemProfile::RecordAllocator::NextSegment() const {
|
|
|
+ base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_);
|
|
|
+- alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile);
|
|
|
+- alloc_size_ = allocator_->GetAllocSize(alloc_reference_);
|
|
|
++ alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile, &alloc_size_);
|
|
|
+ end_offset_ = 0;
|
|
|
+ return alloc_reference_ != 0;
|
|
|
+ }
|
|
|
+@@ -174,13 +173,15 @@
|
|
|
+ size_t size =
|
|
|
+ std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize);
|
|
|
+
|
|
|
+- uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile);
|
|
|
++ size_t new_alloc_size = 0;
|
|
|
++ uint32_t ref =
|
|
|
++ allocator_->Allocate(size, kTypeIdSystemProfile, &new_alloc_size);
|
|
|
+ if (!ref)
|
|
|
+ return false; // Allocator must be full.
|
|
|
+ allocator_->MakeIterable(ref);
|
|
|
+
|
|
|
+ alloc_reference_ = ref;
|
|
|
+- alloc_size_ = allocator_->GetAllocSize(ref);
|
|
|
++ alloc_size_ = new_alloc_size;
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+@@ -289,7 +290,7 @@
|
|
|
+ base::PersistentMemoryAllocator* memory_allocator) {
|
|
|
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
|
|
|
+
|
|
|
+- // Create and store the allocator. A |min_size| of "1" ensures that a memory
|
|
|
++ // Create and store the allocator. A `min_size` of "1" ensures that a memory
|
|
|
+ // block is reserved now.
|
|
|
+ RecordAllocator allocator(memory_allocator, 1);
|
|
|
+ allocators_.push_back(std::move(allocator));
|