continue rewrite of lp_data/glop

This commit is contained in:
Laurent Perron
2019-07-18 11:35:32 -07:00
parent c629f9d3b4
commit 0cd49e115a
15 changed files with 189 additions and 145 deletions

View File

@@ -75,21 +75,21 @@ void DualEdgeNorms::UpdateBeforeBasisPivot(
// Update the norm.
int stat_lower_bounded_norms = 0;
for (const RowIndex row : direction.non_zeros) {
for (const auto e : direction) {
// Note that the update formula used is important to maximize the precision.
// See Koberstein's PhD section 8.2.2.1.
edge_squared_norms_[row] +=
direction[row] *
(direction[row] * new_leaving_squared_norm - 2.0 / pivot * tau[row]);
edge_squared_norms_[e.row()] +=
e.coefficient() * (e.coefficient() * new_leaving_squared_norm -
2.0 / pivot * tau[e.row()]);
// Avoid 0.0 norms (The 1e-4 is the value used by Koberstein).
// TODO(user): use a more precise lower bound depending on the column norm?
// We can do that with Cauchy-Swartz inequality:
// (edge . leaving_column)^2 = 1.0 < ||edge||^2 * ||leaving_column||^2
const Fractional kLowerBound = 1e-4;
if (edge_squared_norms_[row] < kLowerBound) {
if (row == leaving_row) continue;
edge_squared_norms_[row] = kLowerBound;
if (edge_squared_norms_[e.row()] < kLowerBound) {
if (e.row() == leaving_row) continue;
edge_squared_norms_[e.row()] = kLowerBound;
++stat_lower_bounded_norms;
}
}

View File

@@ -190,19 +190,9 @@ void LuFactorization::RightSolveLWithPermutedInput(const DenseColumn& a,
}
}
void LuFactorization::RightSolveLForColumnView(const ColumnView& b,
ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsAllZero(x->values));
x->non_zeros.clear();
if (is_identity_factorization_) {
for (const ColumnView::Entry e : b) {
(*x)[e.row()] = e.coefficient();
x->non_zeros.push_back(e.row());
}
return;
}
template <typename Column>
void LuFactorization::RightSolveLInternal(const Column& b,
ScatteredColumn* x) const {
// This code is equivalent to
// b.PermutedCopyToDenseVector(row_perm_, num_rows, x);
// but it also computes the first column index which does not correspond to an
@@ -210,7 +200,7 @@ void LuFactorization::RightSolveLForColumnView(const ColumnView& b,
// of b.
ColIndex first_column_to_consider(RowToColIndex(x->values.size()));
const ColIndex limit = lower_.GetFirstNonIdentityColumn();
for (const ColumnView::Entry e : b) {
for (const auto e : b) {
const RowIndex permuted_row = row_perm_[e.row()];
(*x)[permuted_row] = e.coefficient();
x->non_zeros.push_back(permuted_row);
@@ -234,6 +224,22 @@ void LuFactorization::RightSolveLForColumnView(const ColumnView& b,
}
}
void LuFactorization::RightSolveLForColumnView(const ColumnView& b,
ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsAllZero(x->values));
x->non_zeros.clear();
if (is_identity_factorization_) {
for (const ColumnView::Entry e : b) {
(*x)[e.row()] = e.coefficient();
x->non_zeros.push_back(e.row());
}
return;
}
RightSolveLInternal(b, x);
}
void LuFactorization::RightSolveLWithNonZeros(ScatteredColumn* x) const {
if (is_identity_factorization_) return;
if (x->non_zeros.empty()) {
@@ -253,9 +259,6 @@ void LuFactorization::RightSolveLWithNonZeros(ScatteredColumn* x) const {
}
}
// TODO(user): This code is almost the same a RightSolveLForSparseColumn()
// except that the API to iterate on the input is different. Find a way to
// deduplicate the code.
void LuFactorization::RightSolveLForScatteredColumn(const ScatteredColumn& b,
ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
@@ -272,34 +275,7 @@ void LuFactorization::RightSolveLForScatteredColumn(const ScatteredColumn& b,
return RightSolveLWithNonZeros(x);
}
// This code is equivalent to
// b.PermutedCopyToDenseVector(row_perm_, num_rows, x);
// but it also computes the first column index which does not correspond to an
// identity column of lower_ thus exploiting a bit the hyper-sparsity of b.
ColIndex first_column_to_consider(RowToColIndex(x->values.size()));
const ColIndex limit = lower_.GetFirstNonIdentityColumn();
for (const RowIndex row : b.non_zeros) {
const RowIndex permuted_row = row_perm_[row];
(*x)[permuted_row] = b[row];
x->non_zeros.push_back(permuted_row);
// The second condition only works because the elements on the diagonal of
// lower_ are all equal to 1.0.
const ColIndex col = RowToColIndex(permuted_row);
if (col < limit || lower_.ColumnIsDiagonalOnly(col)) {
DCHECK_EQ(1.0, lower_.GetDiagonalCoefficient(col));
continue;
}
first_column_to_consider = std::min(first_column_to_consider, col);
}
lower_.ComputeRowsToConsiderInSortedOrder(&x->non_zeros);
x->non_zeros_are_sorted = true;
if (x->non_zeros.empty()) {
lower_.LowerSolveStartingAt(first_column_to_consider, &x->values);
} else {
lower_.HyperSparseSolve(&x->values, &x->non_zeros);
}
RightSolveLInternal(b, x);
}
void LuFactorization::LeftSolveUWithNonZeros(ScatteredRow* y) const {

View File

@@ -17,6 +17,7 @@
#include "ortools/glop/markowitz.h"
#include "ortools/glop/parameters.pb.h"
#include "ortools/glop/status.h"
#include "ortools/lp_data/lp_types.h"
#include "ortools/lp_data/sparse.h"
#include "ortools/util/stats.h"
@@ -221,6 +222,10 @@ class LuFactorization {
// Internal function used in the left solve functions.
void LeftSolveScratchpad() const;
// Internal function used in the right solve functions
template <typename Column>
void RightSolveLInternal(const Column& b, ScatteredColumn* x) const;
// Fills transpose_upper_ from upper_.
void ComputeTransposeUpper();

View File

@@ -152,8 +152,8 @@ void PrimalEdgeNorms::ComputeDirectionLeftInverse(
(direction_left_inverse_.non_zeros.size() + direction.non_zeros.size() <
2 * kThreshold)) {
ClearAndResizeVectorWithNonZeros(size, &direction_left_inverse_);
for (const RowIndex row : direction.non_zeros) {
direction_left_inverse_[RowToColIndex(row)] = direction[row];
for (const auto e : direction) {
direction_left_inverse_[RowToColIndex(e.row())] = e.coefficient();
}
} else {
direction_left_inverse_.values = Transpose(direction.values);

View File

@@ -159,7 +159,7 @@ class RankOneUpdateFactorization {
}
// Same as LeftSolve(), but if the given non_zeros are not empty, then all
// the new non-zeros in the result are happended to it.
// the new non-zeros in the result are appended to it.
void LeftSolveWithNonZeros(ScatteredRow* y) const {
RETURN_IF_NULL(y);
if (y->non_zeros.empty()) {
@@ -167,7 +167,7 @@ class RankOneUpdateFactorization {
return;
}
// tmp_row_is_non_zero_ is always all false before and after this code.
// y->is_non_zero is always all false before and after this code.
y->is_non_zero.resize(y->values.size(), false);
DCHECK(IsAllFalse(y->is_non_zero));
for (const ColIndex col : y->non_zeros) y->is_non_zero[col] = true;

View File

@@ -1424,9 +1424,9 @@ void RevisedSimplex::ComputeDirection(ColIndex col) {
}
}
} else {
for (const RowIndex row : direction_.non_zeros) {
for (const auto e : direction_) {
direction_infinity_norm_ =
std::max(direction_infinity_norm_, std::abs(direction_[row]));
std::max(direction_infinity_norm_, std::abs(e.coefficient()));
}
}
IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
@@ -1438,8 +1438,8 @@ void RevisedSimplex::ComputeDirection(ColIndex col) {
Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
SCOPED_TIME_STAT(&function_stats_);
compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
for (const RowIndex row : direction_.non_zeros) {
compact_matrix_.ColumnAddMultipleToDenseColumn(col, -direction_[row],
for (const auto e : direction_) {
compact_matrix_.ColumnAddMultipleToDenseColumn(col, -e.coefficient(),
&error_);
}
return InfinityNorm(error_);
@@ -1477,7 +1477,7 @@ Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
const Fractional minimum_delta = parameters_.degenerate_ministep_factor() *
parameters_.primal_feasibility_tolerance();
// Initialy, we can skip any variable with a ratio greater than
// Initially, we can skip any variable with a ratio greater than
// bound_flip_ratio since it seems to be always better to choose the
// bound-flip over such leaving variable.
Fractional harris_ratio = bound_flip_ratio;
@@ -1491,19 +1491,19 @@ Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
? parameters_.minimum_acceptable_pivot()
: parameters_.ratio_test_zero_threshold();
for (const RowIndex row : direction_.non_zeros) {
const Fractional magnitude = std::abs(direction_[row]);
for (const auto e : direction_) {
const Fractional magnitude = std::abs(e.coefficient());
if (magnitude <= threshold) continue;
Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(row);
Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(e.row());
// TODO(user): The perturbation is currently disabled, so no need to test
// anything here.
if (false && ratio < 0.0) {
// If the variable is already pass its bound, we use the perturbed version
// of the bound (if bound_perturbation_[basis_[row]] is not zero).
ratio += std::abs(bound_perturbation_[basis_[row]] / direction_[row]);
ratio += std::abs(bound_perturbation_[basis_[e.row()]] / e.coefficient());
}
if (ratio <= harris_ratio) {
leaving_candidates->SetCoefficient(row, ratio);
leaving_candidates->SetCoefficient(e.row(), ratio);
// The second max() makes sure harris_ratio is lower bounded by a small
// positive value. The more classical approach is to bound it by 0.0 but
@@ -1773,9 +1773,9 @@ void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
std::vector<BreakPoint> breakpoints;
const Fractional tolerance = parameters_.primal_feasibility_tolerance();
for (RowIndex row : direction_.non_zeros) {
for (const auto e : direction_) {
const Fractional direction =
reduced_cost > 0.0 ? direction_[row] : -direction_[row];
reduced_cost > 0.0 ? e.coefficient() : -e.coefficient();
const Fractional magnitude = std::abs(direction);
if (magnitude < tolerance) continue;
@@ -1792,7 +1792,7 @@ void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
// account all the variables with an infeasibility smaller than the
// tolerance, and here we will at least improve the one of the leaving
// variable.
const ColIndex col = basis_[row];
const ColIndex col = basis_[e.row()];
DCHECK(variables_info_.GetIsBasicBitRow().IsSet(col));
const Fractional value = variable_values_.Get(col);
@@ -1804,10 +1804,12 @@ void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
// Enqueue the possible transitions. Note that the second tests exclude the
// case where to_lower or to_upper are infinite.
if (to_lower >= 0.0 && to_lower < current_ratio) {
breakpoints.push_back(BreakPoint(row, to_lower, magnitude, lower_bound));
breakpoints.push_back(
BreakPoint(e.row(), to_lower, magnitude, lower_bound));
}
if (to_upper >= 0.0 && to_upper < current_ratio) {
breakpoints.push_back(BreakPoint(row, to_upper, magnitude, upper_bound));
breakpoints.push_back(
BreakPoint(e.row(), to_upper, magnitude, upper_bound));
}
}
@@ -1940,12 +1942,12 @@ void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
// direction).
const Fractional step =
dual_pricing_vector_[leaving_row] / direction_[leaving_row];
for (const RowIndex row : direction_.non_zeros) {
dual_pricing_vector_[row] -= direction_[row] * step;
for (const auto e : direction_) {
dual_pricing_vector_[e.row()] -= e.coefficient() * step;
is_dual_entering_candidate_.Set(
row,
IsDualPhaseILeavingCandidate(dual_pricing_vector_[row],
variable_type[basis_[row]], threshold));
e.row(), IsDualPhaseILeavingCandidate(dual_pricing_vector_[e.row()],
variable_type[basis_[e.row()]],
threshold));
}
dual_pricing_vector_[leaving_row] = step;
@@ -2027,13 +2029,13 @@ void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
}
initially_all_zero_scratchpad_.values.AssignToZero(num_rows_);
} else {
for (const RowIndex row : initially_all_zero_scratchpad_.non_zeros) {
dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
initially_all_zero_scratchpad_[row] = 0.0;
for (const auto e : initially_all_zero_scratchpad_) {
dual_pricing_vector_[e.row()] += e.coefficient();
initially_all_zero_scratchpad_[e.row()] = 0.0;
is_dual_entering_candidate_.Set(
row, IsDualPhaseILeavingCandidate(dual_pricing_vector_[row],
variable_type[basis_[row]],
threshold));
e.row(), IsDualPhaseILeavingCandidate(
dual_pricing_vector_[e.row()],
variable_type[basis_[e.row()]], threshold));
}
}
initially_all_zero_scratchpad_.non_zeros.clear();
@@ -3010,18 +3012,18 @@ gtl::ITIVector<RowIndex, SparseRow> RevisedSimplex::ComputeDictionary(
gtl::ITIVector<RowIndex, SparseRow> dictionary(num_rows_.value());
for (ColIndex col(0); col < num_cols_; ++col) {
ComputeDirection(col);
for (const RowIndex row : direction_.non_zeros) {
for (const auto e : direction_) {
if (column_scales == nullptr) {
dictionary[row].SetCoefficient(col, direction_[row]);
dictionary[e.row()].SetCoefficient(col, e.coefficient());
continue;
}
const Fractional numerator =
col < column_scales->size() ? (*column_scales)[col] : 1.0;
const Fractional denominator = GetBasis(row) < column_scales->size()
? (*column_scales)[GetBasis(row)]
const Fractional denominator = GetBasis(e.row()) < column_scales->size()
? (*column_scales)[GetBasis(e.row())]
: 1.0;
dictionary[row].SetCoefficient(
col, direction_[row] * (numerator / denominator));
dictionary[e.row()].SetCoefficient(
col, direction_[e.row()] * (numerator / denominator));
}
}
return dictionary;

View File

@@ -107,10 +107,11 @@ void UpdateRow::ComputeUpdateRow(RowIndex leaving_row) {
}
}
} else {
for (const ColIndex col : unit_row_left_inverse_.non_zeros) {
if (std::abs(unit_row_left_inverse_.values[col]) > drop_tolerance) {
unit_row_left_inverse_filtered_non_zeros_.push_back(col);
num_row_wise_entries += transposed_matrix_.ColumnNumEntries(col);
for (const auto e : unit_row_left_inverse_) {
if (std::abs(e.coefficient()) > drop_tolerance) {
unit_row_left_inverse_filtered_non_zeros_.push_back(e.column());
num_row_wise_entries +=
transposed_matrix_.ColumnNumEntries(e.column());
}
}
}

View File

@@ -101,7 +101,7 @@ class UpdateRow {
// unit_row_left_inverse_.
void ComputeUnitRowLeftInverse(RowIndex leaving_row);
// ComputeUpdateRow() does the common work and call one of these functions
// ComputeUpdateRow() does the common work and calls one of these functions
// depending on the situation.
void ComputeUpdatesRowWise();
void ComputeUpdatesRowWiseHypersparse();

View File

@@ -157,9 +157,9 @@ void VariableValues::UpdateOnPivoting(const ScatteredColumn& direction,
// Note that there is no need to call variables_info_.Update() on basic
// variables when they change values. Note also that the status of
// entering_col will be updated later.
for (const RowIndex row : direction.non_zeros) {
const ColIndex col = basis_[row];
variable_values_[col] -= direction[row] * step;
for (const auto e : direction) {
const ColIndex col = basis_[e.row()];
variable_values_[col] -= e.coefficient() * step;
}
variable_values_[entering_col] += step;
}
@@ -199,9 +199,9 @@ void VariableValues::UpdateGivenNonBasicVariables(
initially_all_zero_scratchpad_.values.AssignToZero(num_rows);
ResetPrimalInfeasibilityInformation();
} else {
for (const RowIndex row : initially_all_zero_scratchpad_.non_zeros) {
variable_values_[basis_[row]] -= initially_all_zero_scratchpad_[row];
initially_all_zero_scratchpad_[row] = 0.0;
for (const auto e : initially_all_zero_scratchpad_) {
variable_values_[basis_[e.row()]] -= e.coefficient();
initially_all_zero_scratchpad_[e.row()] = 0.0;
}
UpdatePrimalInfeasibilityInformation(
initially_all_zero_scratchpad_.non_zeros);

View File

@@ -22,6 +22,7 @@
#include "ortools/base/basictypes.h"
#include "ortools/base/int_type.h"
#include "ortools/base/int_type_indexed_vector.h"
#include "ortools/base/logging.h"
#include "ortools/util/bitset.h"
// We use typedefs as much as possible to later permit the usage of
@@ -359,13 +360,67 @@ bool ShouldUseDenseIteration(const ScatteredRowOrCol& v) {
static_cast<double>(v.values.size().value());
}
template <typename IndexType>
class ScatteredVectorEntry {
public:
using Index = IndexType;
Index index() const { return index_[i_.value()]; }
Fractional coefficient() const {
return coefficient_[index_[i_.value()].value()];
}
protected:
ScatteredVectorEntry(const Index* indices, const Fractional* coefficients,
EntryIndex i)
: i_(i), index_(indices), coefficient_(coefficients) {}
EntryIndex i_;
const Index* index_;
const Fractional* coefficient_;
};
// --------------------------------------------------------
// VectorIterator
// --------------------------------------------------------
// An iterator over the elements of a sparse data structure that stores the
// elements in arrays for indices and coefficients. The iterator is
// built as a wrapper over a sparse vector entry class; the concrete entry class
// is provided through the template argument EntryType and it must either be
// derived from ScatteredVectorEntry or it must provide the same public and
// protected interface.
template <typename EntryType>
class VectorIterator : EntryType {
public:
using Index = typename EntryType::Index;
using Entry = EntryType;
VectorIterator(const Index* indices, const Fractional* coefficients,
EntryIndex i)
: EntryType(indices, coefficients, i) {}
void operator++() { ++this->i_; }
bool operator!=(const VectorIterator& other) const {
// This operator is intended for use in natural range iteration ONLY.
// Therefore, we prefer to use '<' so that a buggy range iteration which
// start point is *after* its end point stops immediately, instead of
// iterating 2^(number of bits of EntryIndex) times.
return this->i_ < other.i_;
}
const Entry& operator*() const { return *this; }
};
// A simple struct that contains a DenseVector and its non-zeros indices.
template <typename Index>
//
// TODO(user): Move ScatteredVector and related types to new file.
template <typename Index,
typename Iterator = VectorIterator<ScatteredVectorEntry<Index>>>
struct ScatteredVector {
StrictITIVector<Index, Fractional> values;
// This can be left empty in which case we just have the dense representation
// above. Otherwise, it should always be a subset of the actual non-zeros.
// above. Otherwise, it should always be a superset of the actual non-zeros.
bool non_zeros_are_sorted = false;
std::vector<Index> non_zeros;
@@ -377,6 +432,18 @@ struct ScatteredVector {
Fractional operator[](Index index) const { return values[index]; }
Fractional& operator[](Index index) { return values[index]; }
// The iterator syntax for (auto entry : v) where v is a ScatteredVector only
// works when non_zeros is populated (i.e., when the vector is treated as
// sparse).
Iterator begin() const {
DCHECK(!non_zeros.empty() || IsAllZero(values));
return Iterator(this->non_zeros.data(), this->values.data(), EntryIndex(0));
}
Iterator end() const {
return Iterator(this->non_zeros.data(), this->values.data(),
EntryIndex(non_zeros.size()));
}
// Sorting the non-zeros is not always needed, but it allows us to have
// exactly the same behavior while using a sparse iteration or a dense one. So
// we always do it after a Solve().
@@ -388,9 +455,35 @@ struct ScatteredVector {
}
};
// Specialization used in the code.
struct ScatteredColumn : public ScatteredVector<RowIndex> {};
struct ScatteredRow : public ScatteredVector<ColIndex> {};
// Specializations used in the code.
class ScatteredColumnEntry : public ScatteredVectorEntry<RowIndex> {
public:
// Returns the row of the current entry.
RowIndex row() const { return index(); }
protected:
ScatteredColumnEntry(const RowIndex* indices, const Fractional* coefficients,
EntryIndex i)
: ScatteredVectorEntry<RowIndex>(indices, coefficients, i) {}
};
class ScatteredRowEntry : public ScatteredVectorEntry<ColIndex> {
public:
// Returns the column of the current entry.
ColIndex column() const { return index(); }
protected:
ScatteredRowEntry(const ColIndex* indices, const Fractional* coefficients,
EntryIndex i)
: ScatteredVectorEntry<ColIndex>(indices, coefficients, i) {}
};
using ScatteredColumnIterator = VectorIterator<ScatteredColumnEntry>;
using ScatteredRowIterator = VectorIterator<ScatteredRowEntry>;
struct ScatteredColumn
: public ScatteredVector<RowIndex, ScatteredColumnIterator> {};
struct ScatteredRow : public ScatteredVector<ColIndex, ScatteredRowIterator> {};
inline const ScatteredRow& TransposedView(const ScatteredColumn& c) {
return reinterpret_cast<const ScatteredRow&>(c);

View File

@@ -117,8 +117,9 @@ Fractional PreciseScalarProduct(const DenseRowOrColumn& u,
return PreciseScalarProduct(u, v.values);
}
KahanSum sum;
for (const RowIndex row : v.non_zeros) {
sum.Add(u[typename DenseRowOrColumn::IndexType(row.value())] * v[row]);
for (const auto e : v) {
sum.Add(u[typename DenseRowOrColumn::IndexType(e.row().value())] *
e.coefficient());
}
return sum.Value();
}

View File

@@ -400,7 +400,7 @@ class CompactSparseMatrix {
// Same as ColumnAddMultipleToDenseColumn() but also adds the new non-zeros to
// the non_zeros vector. A non-zero is "new" if is_non_zero[row] was false,
// and we update dense_column[row]. This functions also updates is_non_zero.
// and we update dense_column[row]. This function also updates is_non_zero.
void ColumnAddMultipleToSparseScatteredColumn(ColIndex col,
Fractional multiplier,
ScatteredColumn* column) const {

View File

@@ -35,7 +35,7 @@ class SparseColumnEntry : public SparseVectorEntry<RowIndex> {
EntryIndex i)
: SparseVectorEntry<RowIndex>(indices, coefficients, i) {}
};
using SparseColumnIterator = SparseVectorIterator<SparseColumnEntry>;
using SparseColumnIterator = VectorIterator<SparseColumnEntry>;
class ColumnView;
@@ -70,7 +70,7 @@ class ColumnView {
// (see cl/51057736).
// Example: for(const Entry e : column_view)
typedef SparseColumnEntry Entry;
typedef SparseVectorIterator<Entry> Iterator;
typedef VectorIterator<Entry> Iterator;
ColumnView(EntryIndex num_entries, const RowIndex* rows,
const Fractional* const coefficients)

View File

@@ -32,7 +32,7 @@ class SparseRowEntry : public SparseVectorEntry<ColIndex> {
EntryIndex i)
: SparseVectorEntry<ColIndex>(indices, coefficients, i) {}
};
using SparseRowIterator = SparseVectorIterator<SparseRowEntry>;
using SparseRowIterator = VectorIterator<SparseRowEntry>;
// TODO(user): Use this class where appropriate, i.e. when a SparseColumn is
// used to store a row vector (by means of RowIndex to ColIndex casting).

View File

@@ -48,8 +48,6 @@ namespace glop {
template <typename IndexType>
class SparseVectorEntry;
template <typename EntryType>
class SparseVectorIterator;
// --------------------------------------------------------
// SparseVector
@@ -81,8 +79,7 @@ class SparseVectorIterator;
// TODO(user): un-expose this type to client; by getting rid of the
// index-based APIs and leveraging iterator-based APIs; if possible.
template <typename IndexType,
typename IteratorType =
SparseVectorIterator<SparseVectorEntry<IndexType>>>
typename IteratorType = VectorIterator<SparseVectorEntry<IndexType>>>
class SparseVector {
public:
typedef IndexType Index;
@@ -445,37 +442,6 @@ class SparseVectorEntry {
const Fractional* coefficient_;
};
// --------------------------------------------------------
// SparseVectorIterator
// --------------------------------------------------------
// An iterator over the elements of a sparse data structure that stores the
// elements in parallel arrays for indices and coefficients. The iterator is
// built as a wrapper over a sparse vector entry class; the concrete entry class
// is provided through the template argument EntryType and it must either be
// derived from SparseVectorEntry or it must provide the same public and
// protected interface.
template <typename EntryType>
class SparseVectorIterator : EntryType {
public:
using Index = typename EntryType::Index;
using Entry = EntryType;
SparseVectorIterator(const Index* indices, const Fractional* coefficients,
EntryIndex i)
: EntryType(indices, coefficients, i) {}
void operator++() { ++this->i_; }
bool operator!=(const SparseVectorIterator& other) const {
// This operator is intended for use in natural range iteration ONLY.
// Therefore, we prefer to use '<' so that a buggy range iteration which
// start point is *after* its end point stops immediately, instead of
// iterating 2^(number of bits of EntryIndex) times.
return this->i_ < other.i_;
}
const Entry& operator*() const { return *this; }
};
template <typename IndexType, typename IteratorType>
IteratorType SparseVector<IndexType, IteratorType>::begin() const {
return Iterator(this->index_, this->coefficient_, EntryIndex(0));