rearchitect glop code

This commit is contained in:
Laurent Perron
2018-02-12 11:35:51 +01:00
parent 17503ac111
commit 7aad40b1e3
21 changed files with 336 additions and 382 deletions

View File

@@ -27,20 +27,19 @@ namespace glop {
const Fractional EtaMatrix::kSparseThreshold = 0.5;
EtaMatrix::EtaMatrix(ColIndex eta_col,
const std::vector<RowIndex>& eta_non_zeros,
DenseColumn* dense_eta)
EtaMatrix::EtaMatrix(ColIndex eta_col, const ScatteredColumn& direction)
: eta_col_(eta_col),
eta_col_coefficient_((*dense_eta)[ColToRowIndex(eta_col)]),
eta_col_coefficient_(direction[ColToRowIndex(eta_col)]),
eta_coeff_(),
sparse_eta_coeff_() {
DCHECK_NE(0.0, eta_col_coefficient_);
eta_coeff_.swap(*dense_eta);
eta_coeff_ = direction.values;
eta_coeff_[ColToRowIndex(eta_col_)] = 0.0;
// Only fill sparse_eta_coeff_ if it is sparse enough.
if (eta_non_zeros.size() < kSparseThreshold * eta_coeff_.size().value()) {
for (const RowIndex row : eta_non_zeros) {
if (direction.non_zeros.size() <
kSparseThreshold * eta_coeff_.size().value()) {
for (const RowIndex row : direction.non_zeros) {
if (row == ColToRowIndex(eta_col)) continue;
sparse_eta_coeff_.SetCoefficient(row, eta_coeff_[row]);
}
@@ -144,11 +143,10 @@ void EtaFactorization::Clear() { STLDeleteElements(&eta_matrix_); }
void EtaFactorization::Update(ColIndex entering_col,
RowIndex leaving_variable_row,
const std::vector<RowIndex>& eta_non_zeros,
DenseColumn* dense_eta) {
const ScatteredColumn& direction) {
const ColIndex leaving_variable_col = RowToColIndex(leaving_variable_row);
EtaMatrix* const eta_factorization =
new EtaMatrix(leaving_variable_col, eta_non_zeros, dense_eta);
new EtaMatrix(leaving_variable_col, direction);
eta_matrix_.push_back(eta_factorization);
}
@@ -288,16 +286,14 @@ Status BasisFactorization::MiddleProductFormUpdate(
Status BasisFactorization::Update(ColIndex entering_col,
RowIndex leaving_variable_row,
const std::vector<RowIndex>& eta_non_zeros,
DenseColumn* dense_eta) {
const ScatteredColumn& direction) {
if (num_updates_ < max_num_updates_) {
SCOPED_TIME_STAT(&stats_);
if (use_middle_product_form_update_) {
GLOP_RETURN_IF_ERROR(
MiddleProductFormUpdate(entering_col, leaving_variable_row));
} else {
eta_factorization_.Update(entering_col, leaving_variable_row,
eta_non_zeros, dense_eta);
eta_factorization_.Update(entering_col, leaving_variable_row, direction);
}
++num_updates_;
tau_computation_can_be_optimized_ = false;
@@ -320,19 +316,18 @@ void BasisFactorization::LeftSolve(DenseRow* y) const {
}
}
void BasisFactorization::LeftSolveWithNonZeros(
DenseRow* y, ColIndexVector* non_zeros) const {
void BasisFactorization::LeftSolveWithNonZeros(ScatteredRow* y) const {
SCOPED_TIME_STAT(&stats_);
RETURN_IF_NULL(y);
BumpDeterministicTimeForSolve(matrix_.num_rows().value());
if (use_middle_product_form_update_) {
lu_factorization_.LeftSolveUWithNonZeros(y, non_zeros);
rank_one_factorization_.LeftSolveWithNonZeros(y, non_zeros);
lu_factorization_.LeftSolveLWithNonZeros(y, non_zeros, nullptr);
lu_factorization_.LeftSolveUWithNonZeros(y);
rank_one_factorization_.LeftSolveWithNonZeros(y);
lu_factorization_.LeftSolveLWithNonZeros(y, nullptr);
} else {
eta_factorization_.LeftSolve(y);
lu_factorization_.LeftSolve(y);
ComputeNonZeros(*y, non_zeros);
eta_factorization_.LeftSolve(&y->values);
lu_factorization_.LeftSolve(&y->values);
ComputeNonZeros(y->values, &y->non_zeros);
}
}
@@ -350,75 +345,72 @@ void BasisFactorization::RightSolve(DenseColumn* d) const {
}
}
void BasisFactorization::RightSolveWithNonZeros(
DenseColumn* d, std::vector<RowIndex>* non_zeros) const {
void BasisFactorization::RightSolveWithNonZeros(ScatteredColumn* d) const {
SCOPED_TIME_STAT(&stats_);
RETURN_IF_NULL(d);
BumpDeterministicTimeForSolve(non_zeros->size());
BumpDeterministicTimeForSolve(d->non_zeros.size());
if (use_middle_product_form_update_) {
lu_factorization_.RightSolveL(d);
rank_one_factorization_.RightSolve(d);
lu_factorization_.RightSolveL(&d->values);
rank_one_factorization_.RightSolve(&d->values);
// We need to clear non-zeros because at this point in the code, they don't
// correspond to the zeros of d. An empty non_zeros means that
// RightSolveWithNonZeros() will recompute them.
non_zeros->clear();
lu_factorization_.RightSolveUWithNonZeros(d, non_zeros);
d->non_zeros.clear();
lu_factorization_.RightSolveUWithNonZeros(d);
} else {
lu_factorization_.RightSolve(d);
eta_factorization_.RightSolve(d);
ComputeNonZeros(*d, non_zeros);
lu_factorization_.RightSolve(&d->values);
eta_factorization_.RightSolve(&d->values);
ComputeNonZeros(d->values, &d->non_zeros);
return;
}
}
DenseColumn* BasisFactorization::RightSolveForTau(ScatteredColumnReference a)
const {
DenseColumn* BasisFactorization::RightSolveForTau(
const ScatteredColumn& a) const {
SCOPED_TIME_STAT(&stats_);
BumpDeterministicTimeForSolve(matrix_.num_rows().value());
if (use_middle_product_form_update_) {
if (tau_computation_can_be_optimized_) {
// Once used, the intermediate result is overriden, so RightSolveForTau()
// Once used, the intermediate result is overridden, so RightSolveForTau()
// can no longer use the optimized algorithm.
tau_computation_can_be_optimized_ = false;
lu_factorization_.RightSolveLWithPermutedInput(a.dense_column, &tau_);
tau_non_zeros_.clear();
lu_factorization_.RightSolveLWithPermutedInput(a.values, &tau_.values);
tau_.non_zeros.clear();
} else {
if (tau_non_zeros_.empty()) {
tau_.assign(matrix_.num_rows(), 0);
// TODO(user): Extract function.
if (tau_.non_zeros.empty()) {
tau_.values.assign(matrix_.num_rows(), 0);
} else {
tau_.resize(matrix_.num_rows(), 0);
for (const RowIndex row : tau_non_zeros_) {
tau_.values.resize(matrix_.num_rows(), 0);
for (const RowIndex row : tau_.non_zeros) {
tau_[row] = 0.0;
}
}
lu_factorization_.RightSolveLForScatteredColumn(a, &tau_,
&tau_non_zeros_);
lu_factorization_.RightSolveLForScatteredColumn(a, &tau_);
}
rank_one_factorization_.RightSolveWithNonZeros(&tau_, &tau_non_zeros_);
lu_factorization_.RightSolveUWithNonZeros(&tau_, &tau_non_zeros_);
rank_one_factorization_.RightSolveWithNonZeros(&tau_);
lu_factorization_.RightSolveUWithNonZeros(&tau_);
} else {
tau_ = a.dense_column;
lu_factorization_.RightSolve(&tau_);
eta_factorization_.RightSolve(&tau_);
tau_.values = a.values;
lu_factorization_.RightSolve(&tau_.values);
eta_factorization_.RightSolve(&tau_.values);
}
tau_is_computed_ = true;
return &tau_;
return &tau_.values;
}
void BasisFactorization::LeftSolveForUnitRow(ColIndex j, DenseRow* y,
ColIndexVector* non_zeros) const {
void BasisFactorization::LeftSolveForUnitRow(ColIndex j,
ScatteredRow* y) const {
SCOPED_TIME_STAT(&stats_);
RETURN_IF_NULL(y);
BumpDeterministicTimeForSolve(1);
ClearAndResizeVectorWithNonZeros(RowToColIndex(matrix_.num_rows()), y,
non_zeros);
ClearAndResizeVectorWithNonZeros(RowToColIndex(matrix_.num_rows()), y);
if (!use_middle_product_form_update_) {
(*y)[j] = 1.0;
non_zeros->push_back(j);
eta_factorization_.SparseLeftSolve(y, non_zeros);
lu_factorization_.SparseLeftSolve(y, non_zeros);
y->non_zeros.push_back(j);
eta_factorization_.SparseLeftSolve(&y->values, &y->non_zeros);
lu_factorization_.SparseLeftSolve(&y->values, &y->non_zeros);
return;
}
@@ -427,72 +419,71 @@ void BasisFactorization::LeftSolveForUnitRow(ColIndex j, DenseRow* y,
// all positions in front of the unit will be zero (modulo the column
// permutation).
if (left_pool_mapping_[j] == kInvalidCol) {
const ColIndex start =
lu_factorization_.LeftSolveUForUnitRow(j, y, non_zeros);
if (non_zeros->empty()) {
left_pool_mapping_[j] =
storage_.AddDenseColumnPrefix(Transpose(*y), ColToRowIndex(start));
const ColIndex start = lu_factorization_.LeftSolveUForUnitRow(j, y);
if (y->non_zeros.empty()) {
left_pool_mapping_[j] = storage_.AddDenseColumnPrefix(
Transpose(y->values), ColToRowIndex(start));
} else {
left_pool_mapping_[j] = storage_.AddDenseColumnWithNonZeros(
Transpose(*y), *reinterpret_cast<RowIndexVector*>(non_zeros));
Transpose(y->values),
*reinterpret_cast<RowIndexVector*>(&y->non_zeros));
}
} else {
DenseColumn* const x = reinterpret_cast<DenseColumn*>(y);
RowIndexVector* const nz = reinterpret_cast<RowIndexVector*>(non_zeros);
RowIndexVector* const nz = reinterpret_cast<RowIndexVector*>(&y->non_zeros);
storage_.ColumnCopyToClearedDenseColumnWithNonZeros(left_pool_mapping_[j],
x, nz);
}
rank_one_factorization_.LeftSolveWithNonZeros(y, non_zeros);
rank_one_factorization_.LeftSolveWithNonZeros(y);
// We only keep the intermediate result needed for the optimized tau_
// computation if it was computed after the last time this was called.
if (tau_is_computed_) {
tau_is_computed_ = false;
tau_computation_can_be_optimized_ =
lu_factorization_.LeftSolveLWithNonZeros(y, non_zeros, &tau_);
tau_non_zeros_.clear();
lu_factorization_.LeftSolveLWithNonZeros(y, &tau_.values);
tau_.non_zeros.clear();
} else {
tau_computation_can_be_optimized_ = false;
lu_factorization_.LeftSolveLWithNonZeros(y, non_zeros, nullptr);
lu_factorization_.LeftSolveLWithNonZeros(y, nullptr);
}
}
void BasisFactorization::RightSolveForProblemColumn(
ColIndex col, DenseColumn* d, std::vector<RowIndex>* non_zeros) const {
void BasisFactorization::RightSolveForProblemColumn(ColIndex col,
ScatteredColumn* d) const {
SCOPED_TIME_STAT(&stats_);
RETURN_IF_NULL(d);
BumpDeterministicTimeForSolve(matrix_.column(col).num_entries().value());
if (!use_middle_product_form_update_) {
lu_factorization_.SparseRightSolve(matrix_.column(col), matrix_.num_rows(),
d);
eta_factorization_.RightSolve(d);
ComputeNonZeros(*d, non_zeros);
&d->values);
eta_factorization_.RightSolve(&d->values);
ComputeNonZeros(d->values, &d->non_zeros);
return;
}
// TODO(user): if right_pool_mapping_[col] != kInvalidCol, we can reuse it and
// just apply the last rank one update since it was computed.
ClearAndResizeVectorWithNonZeros(matrix_.num_rows(), d, non_zeros);
lu_factorization_.RightSolveLForSparseColumn(matrix_.column(col), d,
non_zeros);
rank_one_factorization_.RightSolveWithNonZeros(d, non_zeros);
ClearAndResizeVectorWithNonZeros(matrix_.num_rows(), d);
lu_factorization_.RightSolveLForSparseColumn(matrix_.column(col), d);
rank_one_factorization_.RightSolveWithNonZeros(d);
if (col >= right_pool_mapping_.size()) {
// This is needed because when we do an incremental solve with only new
// columns, we still reuse the current factorization without calling
// Refactorize() which would have resized this vector.
right_pool_mapping_.resize(col + 1, kInvalidCol);
}
if (non_zeros->empty()) {
right_pool_mapping_[col] = right_storage_.AddDenseColumn(*d);
if (d->non_zeros.empty()) {
right_pool_mapping_[col] = right_storage_.AddDenseColumn(d->values);
} else {
// The sort is needed if we want to have the same behavior for the sparse or
// hyper-sparse version.
std::sort(non_zeros->begin(), non_zeros->end());
std::sort(d->non_zeros.begin(), d->non_zeros.end());
right_pool_mapping_[col] =
right_storage_.AddDenseColumnWithNonZeros(*d, *non_zeros);
right_storage_.AddDenseColumnWithNonZeros(d->values, d->non_zeros);
}
lu_factorization_.RightSolveUWithNonZeros(d, non_zeros);
lu_factorization_.RightSolveUWithNonZeros(d);
}
Fractional BasisFactorization::RightSolveSquaredNorm(const SparseColumn& a)

View File

@@ -50,8 +50,7 @@ namespace glop {
// 0 ... 0 -e_{n-1}/e_j 0 ... 1 ]
class EtaMatrix {
public:
EtaMatrix(ColIndex eta_col, const std::vector<RowIndex>& eta_non_zeros,
DenseColumn* dense_eta);
EtaMatrix(ColIndex eta_col, const ScatteredColumn& direction);
virtual ~EtaMatrix();
// Solves the system y.E = c, 'c' beeing the initial value of 'y'.
@@ -117,8 +116,7 @@ class EtaFactorization {
// Updates the eta factorization, i.e. adds the new eta matrix defined by
// the leaving variable and the corresponding eta column.
void Update(ColIndex entering_col, RowIndex leaving_variable_row,
const std::vector<RowIndex>& eta_non_zeros,
DenseColumn* dense_eta);
const ScatteredColumn& direction);
// Left solves all systems from right to left, i.e. y_i = y_{i+1}.(E_i)^{-1}
void LeftSolve(DenseRow* y) const;
@@ -204,35 +202,31 @@ class BasisFactorization {
// avoid a copy (only if the standard eta update is used). Returns an error if
// the matrix could not be factorized: i.e. not a basis.
Status Update(ColIndex entering_col, RowIndex leaving_variable_row,
const std::vector<RowIndex>& eta_non_zeros,
DenseColumn* dense_eta) MUST_USE_RESULT;
const ScatteredColumn& direction) MUST_USE_RESULT;
// Left solves the system y.B = rhs, where y initialy contains rhs.
// The second version also computes the non-zero positions of the result.
void LeftSolve(DenseRow* y) const;
void LeftSolveWithNonZeros(DenseRow* y, ColIndexVector* non_zeros) const;
void LeftSolveWithNonZeros(ScatteredRow* y) const;
// Left solves the system y.B = e_j, where e_j has only 1 non-zero
// coefficient of value 1.0 at position 'j'.
void LeftSolveForUnitRow(ColIndex j, DenseRow* y,
ColIndexVector* non_zeros) const;
void LeftSolveForUnitRow(ColIndex j, ScatteredRow* y) const;
// Same as RightSolve() for matrix.column(col).
// This also exploits its sparsity.
void RightSolveForProblemColumn(ColIndex col, DenseColumn* d,
std::vector<RowIndex>* non_zeros) const;
void RightSolveForProblemColumn(ColIndex col, ScatteredColumn* d) const;
// Right solves the system B.d = a where the input is the initial value of d.
void RightSolve(DenseColumn* d) const;
void RightSolveWithNonZeros(DenseColumn* d,
std::vector<RowIndex>* non_zeros) const;
void RightSolveWithNonZeros(ScatteredColumn* d) const;
// Specialized version for ComputeTau() in DualEdgeNorms. This reuses an
// intermediate result of the last LeftSolveForUnitRow() in order to save a
// permutation if it is available. Note that the input 'a' should always be
// equal to the last result of LeftSolveForUnitRow() and will be used for a
// DCHECK() or if the intermediate result wasn't kept.
DenseColumn* RightSolveForTau(ScatteredColumnReference a) const;
DenseColumn* RightSolveForTau(const ScatteredColumn& a) const;
// Returns the norm of B^{-1}.a, this is a specific function because
// it is a bit faster and it avoids polluting the stats of RightSolve().
@@ -322,8 +316,7 @@ class BasisFactorization {
// This is used by RightSolveForTau(). It holds an intermediate result from
// the last LeftSolveForUnitRow() and also the final result of
// RightSolveForTau().
mutable DenseColumn tau_;
mutable std::vector<RowIndex> tau_non_zeros_;
mutable ScatteredColumn tau_;
// Booleans controlling the interaction between LeftSolveForUnitRow() that may
// or may not keep its intermediate results for the optimized

View File

@@ -41,11 +41,11 @@ void DualEdgeNorms::UpdateDataOnBasisPermutation(
void DualEdgeNorms::UpdateBeforeBasisPivot(
ColIndex entering_col, RowIndex leaving_row,
ScatteredColumnReference direction,
ScatteredColumnReference unit_row_left_inverse) {
const ScatteredColumn& direction,
const ScatteredRow& unit_row_left_inverse) {
// No need to update if we will recompute it from scratch later.
if (recompute_edge_squared_norms_) return;
DenseColumn* tau = ComputeTau(unit_row_left_inverse);
DenseColumn* tau = ComputeTau(TransposedView(unit_row_left_inverse));
SCOPED_TIME_STAT(&stats_);
// ||unit_row_left_inverse||^2 is the same as
@@ -55,7 +55,7 @@ void DualEdgeNorms::UpdateBeforeBasisPivot(
// Note that we use the PreciseSquaredNorms() because it is a small price to
// pay for more precise update below.
const Fractional leaving_squared_norm =
PreciseSquaredNorm(unit_row_left_inverse);
PreciseSquaredNorm(TransposedView(unit_row_left_inverse));
const Fractional old_squared_norm = edge_squared_norms_[leaving_row];
const Fractional estimated_edge_norms_accuracy =
(sqrt(leaving_squared_norm) - sqrt(old_squared_norm)) /
@@ -75,7 +75,7 @@ void DualEdgeNorms::UpdateBeforeBasisPivot(
// Update the norm.
int stat_lower_bounded_norms = 0;
for (const RowIndex row : direction.non_zero_rows) {
for (const RowIndex row : direction.non_zeros) {
// Note that the update formula used is important to maximize the precision.
// See Koberstein's PhD section 8.2.2.1.
edge_squared_norms_[row] +=
@@ -112,7 +112,7 @@ void DualEdgeNorms::ComputeEdgeSquaredNorms() {
}
DenseColumn* DualEdgeNorms::ComputeTau(
ScatteredColumnReference unit_row_left_inverse) {
const ScatteredColumn& unit_row_left_inverse) {
SCOPED_TIME_STAT(&stats_);
DenseColumn* result =
basis_factorization_.RightSolveForTau(unit_row_left_inverse);

View File

@@ -72,8 +72,8 @@ class DualEdgeNorms {
// - unit_row_left_inverse is the left inverse of the unit row with index
// given by the leaving_row. This is also the leaving dual edge.
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row,
ScatteredColumnReference direction,
ScatteredColumnReference unit_row_left_inverse);
const ScatteredColumn& direction,
const ScatteredRow& unit_row_left_inverse);
// Sets the algorithm parameters.
void SetParameters(const GlopParameters& parameters) {
@@ -91,7 +91,7 @@ class DualEdgeNorms {
// Computes the vector tau needed to update the norms using a right solve:
// B.tau = (u_i)^T, u_i.B = e_i for i = leaving_row.
DenseColumn* ComputeTau(ScatteredColumnReference unit_row_left_inverse);
DenseColumn* ComputeTau(const ScatteredColumn& unit_row_left_inverse);
// Statistics.
struct Stats : public StatsGroup {

View File

@@ -102,11 +102,11 @@ Status EnteringVariable::DualChooseEnteringColumn(
const Fractional threshold = parameters_.ratio_test_zero_threshold();
const DenseBitRow& can_decrease = variables_info_.GetCanDecreaseBitRow();
const DenseBitRow& can_increase = variables_info_.GetCanIncreaseBitRow();
const DenseBitRow& is_boxed = variables_info_.GetNonBasicBoxedVariables();
// Harris ratio test. See below for more explanation. Here this is used to
// prune the first pass by not enqueueing ColWithRatio for columns that have
// a ratio greater than the current harris_ratio.
const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
const Fractional harris_tolerance =
parameters_.harris_tolerance_ratio() *
reduced_costs_->GetDualFeasibilityTolerance();
@@ -122,7 +122,7 @@ Status EnteringVariable::DualChooseEnteringColumn(
// In this case, at some point the reduced cost will be positive if not
// already, and the column will be dual-infeasible.
if (can_decrease.IsSet(col) && coeff > threshold) {
if (variable_type[col] != VariableType::UPPER_AND_LOWER_BOUNDED) {
if (!is_boxed[col]) {
if (-reduced_costs[col] > harris_ratio * coeff) continue;
harris_ratio = std::min(
harris_ratio, (-reduced_costs[col] + harris_tolerance) / coeff);
@@ -135,7 +135,7 @@ Status EnteringVariable::DualChooseEnteringColumn(
// In this case, at some point the reduced cost will be negative if not
// already, and the column will be dual-infeasible.
if (can_increase.IsSet(col) && coeff < -threshold) {
if (variable_type[col] != VariableType::UPPER_AND_LOWER_BOUNDED) {
if (!is_boxed[col]) {
if (reduced_costs[col] > harris_ratio * -coeff) continue;
harris_ratio = std::min(
harris_ratio, (reduced_costs[col] + harris_tolerance) / -coeff);
@@ -185,7 +185,7 @@ Status EnteringVariable::DualChooseEnteringColumn(
// Note that the actual flipping will be done afterwards by
// MakeBoxedVariableDualFeasible() in revised_simplex.cc.
if (variation_magnitude > threshold) {
if (variable_type[top.col] == VariableType::UPPER_AND_LOWER_BOUNDED) {
if (is_boxed[top.col]) {
variation_magnitude -=
variables_info_.GetBoundDifference(top.col) * top.coeff_magnitude;
if (variation_magnitude > threshold) {

View File

@@ -74,9 +74,9 @@ void DumpLinearProgramIfRequiredByFlags(const LinearProgram& linear_program,
if (!FLAGS_lp_dump_to_proto_file) return;
#ifdef __PORTABLE_PLATFORM__
LOG(WARNING) << "DumpLinearProgramIfRequiredByFlags(linear_program, num) "
"requested for linear_program.name()= ",
linear_program.name(), ", num=", num,
" but is not implemented for this platform.";
"requested for linear_program.name()='"
<< linear_program.name() << "', num=" << num
<< " but is not implemented for this platform.";
#else
std::string filename = FLAGS_lp_dump_file_basename;
if (filename.empty()) {

View File

@@ -259,16 +259,15 @@ void LuFactorization::LeftSolveL(DenseRow* y) const {
ApplyInversePermutation(row_perm_, dense_column_scratchpad_, x);
}
void LuFactorization::RightSolveLForSparseColumn(
const SparseColumn& b, DenseColumn* x,
std::vector<RowIndex>* non_zeros) const {
void LuFactorization::RightSolveLForSparseColumn(const SparseColumn& b,
ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsAllZero(*x));
non_zeros->clear();
DCHECK(IsAllZero(x->values));
x->non_zeros.clear();
if (is_identity_factorization_) {
for (const SparseColumn::Entry e : b) {
(*x)[e.row()] = e.coefficient();
non_zeros->push_back(e.row());
x->non_zeros.push_back(e.row());
}
return;
}
@@ -278,12 +277,12 @@ void LuFactorization::RightSolveLForSparseColumn(
// but it also computes the first column index which does not correspond to an
// identity column of lower_ thus exploiting a bit the hyper-sparsity
// of b.
ColIndex first_column_to_consider(RowToColIndex(x->size()));
ColIndex first_column_to_consider(RowToColIndex(x->values.size()));
const ColIndex limit = lower_.GetFirstNonIdentityColumn();
for (const SparseColumn::Entry e : b) {
const RowIndex permuted_row = row_perm_[e.row()];
(*x)[permuted_row] = e.coefficient();
non_zeros->push_back(permuted_row);
x->non_zeros.push_back(permuted_row);
// The second condition only works because the elements on the diagonal of
// lower_ are all equal to 1.0.
@@ -295,27 +294,26 @@ void LuFactorization::RightSolveLForSparseColumn(
first_column_to_consider = std::min(first_column_to_consider, col);
}
lower_.ComputeRowsToConsiderInSortedOrder(non_zeros);
if (non_zeros->empty()) {
lower_.LowerSolveStartingAt(first_column_to_consider, x);
lower_.ComputeRowsToConsiderInSortedOrder(&x->non_zeros);
if (x->non_zeros.empty()) {
lower_.LowerSolveStartingAt(first_column_to_consider, &x->values);
} else {
lower_.HyperSparseSolve(x, non_zeros);
lower_.HyperSparseSolve(&x->values, &x->non_zeros);
}
}
// TODO(user): This code is almost the same a RightSolveLForSparseColumn()
// except that the API to iterate on the input is different. Find a way to
// deduplicate the code.
void LuFactorization::RightSolveLForScatteredColumn(
const ScatteredColumnReference& b, DenseColumn* x,
std::vector<RowIndex>* non_zeros) const {
void LuFactorization::RightSolveLForScatteredColumn(const ScatteredColumn& b,
ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsAllZero(*x));
non_zeros->clear();
DCHECK(IsAllZero(x->values));
x->non_zeros.clear();
if (is_identity_factorization_) {
for (const RowIndex row : b.non_zero_rows) {
(*x)[row] = b.dense_column[row];
non_zeros->push_back(row);
for (const RowIndex row : b.non_zeros) {
(*x)[row] = b[row];
x->non_zeros.push_back(row);
}
return;
}
@@ -324,12 +322,12 @@ void LuFactorization::RightSolveLForScatteredColumn(
// b.PermutedCopyToDenseVector(row_perm_, num_rows, x);
// but it also computes the first column index which does not correspond to an
// identity column of lower_ thus exploiting a bit the hyper-sparsity of b.
ColIndex first_column_to_consider(RowToColIndex(x->size()));
ColIndex first_column_to_consider(RowToColIndex(x->values.size()));
const ColIndex limit = lower_.GetFirstNonIdentityColumn();
for (const RowIndex row : b.non_zero_rows) {
for (const RowIndex row : b.non_zeros) {
const RowIndex permuted_row = row_perm_[row];
(*x)[permuted_row] = b.dense_column[row];
non_zeros->push_back(permuted_row);
(*x)[permuted_row] = b[row];
x->non_zeros.push_back(permuted_row);
// The second condition only works because the elements on the diagonal of
// lower_ are all equal to 1.0.
@@ -341,22 +339,21 @@ void LuFactorization::RightSolveLForScatteredColumn(
first_column_to_consider = std::min(first_column_to_consider, col);
}
lower_.ComputeRowsToConsiderInSortedOrder(non_zeros);
if (non_zeros->empty()) {
lower_.LowerSolveStartingAt(first_column_to_consider, x);
lower_.ComputeRowsToConsiderInSortedOrder(&x->non_zeros);
if (x->non_zeros.empty()) {
lower_.LowerSolveStartingAt(first_column_to_consider, &x->values);
} else {
lower_.HyperSparseSolve(x, non_zeros);
lower_.HyperSparseSolve(&x->values, &x->non_zeros);
}
}
void LuFactorization::LeftSolveUWithNonZeros(
DenseRow* y, std::vector<ColIndex>* non_zeros) const {
void LuFactorization::LeftSolveUWithNonZeros(ScatteredRow* y) const {
SCOPED_TIME_STAT(&stats_);
if (is_identity_factorization_) return;
DCHECK(col_perm_.empty());
DenseColumn* const x = DenseRowAsColumn(y);
RowIndexVector* const nz = reinterpret_cast<RowIndexVector*>(non_zeros);
DenseColumn* const x = DenseRowAsColumn(&y->values);
RowIndexVector* const nz = reinterpret_cast<RowIndexVector*>(&y->non_zeros);
transpose_upper_.ComputeRowsToConsiderInSortedOrder(nz);
if (nz->empty()) {
upper_.TransposeUpperSolve(x);
@@ -365,47 +362,45 @@ void LuFactorization::LeftSolveUWithNonZeros(
}
}
void LuFactorization::RightSolveUWithNonZeros(
DenseColumn* x, std::vector<RowIndex>* non_zeros) const {
void LuFactorization::RightSolveUWithNonZeros(ScatteredColumn* x) const {
SCOPED_TIME_STAT(&stats_);
if (is_identity_factorization_) {
if (non_zeros->empty()) ComputeNonZeros(*x, non_zeros);
if (x->non_zeros.empty()) ComputeNonZeros(x->values, &x->non_zeros);
return;
}
// If non-zeros is non-empty, we use an hypersparse solve. Note that if
// non_zeros starts to be too big, we clear it and thus switch back to a
// normal sparse solve.
upper_.ComputeRowsToConsiderInSortedOrder(non_zeros);
if (non_zeros->empty()) {
upper_.UpperSolveWithNonZeros(x, non_zeros);
upper_.ComputeRowsToConsiderInSortedOrder(&x->non_zeros);
if (x->non_zeros.empty()) {
upper_.UpperSolveWithNonZeros(&x->values, &x->non_zeros);
} else {
upper_.HyperSparseSolveWithReversedNonZeros(x, non_zeros);
upper_.HyperSparseSolveWithReversedNonZeros(&x->values, &x->non_zeros);
}
if (!col_perm_.empty()) {
// Note(user): This is only needed for the test because in Glop the
// col_perm_ is always "absorbed" by permuting the basis. So it is
// fine to do it in a non hyper-sparse way.
PermuteAndComputeNonZeros(inverse_col_perm_, &dense_zero_scratchpad_, x,
non_zeros);
PermuteAndComputeNonZeros(inverse_col_perm_, &dense_zero_scratchpad_,
&x->values, &x->non_zeros);
}
}
bool LuFactorization::LeftSolveLWithNonZeros(
DenseRow* y, ColIndexVector* non_zeros,
DenseColumn* result_before_permutation) const {
ScatteredRow* y, DenseColumn* result_before_permutation) const {
SCOPED_TIME_STAT(&stats_);
if (is_identity_factorization_) {
if (non_zeros->empty()) ComputeNonZeros(*y, non_zeros);
if (y->non_zeros.empty()) ComputeNonZeros(y->values, &y->non_zeros);
// It is not advantageous to fill result_before_permutation in this case.
return false;
}
DenseColumn* const x = DenseRowAsColumn(y);
std::vector<RowIndex>* nz = reinterpret_cast<RowIndexVector*>(non_zeros);
DenseColumn* const x = DenseRowAsColumn(&y->values);
std::vector<RowIndex>* nz = reinterpret_cast<RowIndexVector*>(&y->non_zeros);
// Hypersparse?
RowIndex last_non_zero_row = ColToRowIndex(y->size() - 1);
RowIndex last_non_zero_row = ColToRowIndex(y->values.size() - 1);
transpose_lower_.ComputeRowsToConsiderInSortedOrder(nz);
if (nz->empty()) {
lower_.TransposeLowerSolve(x, &last_non_zero_row);
@@ -437,32 +432,32 @@ bool LuFactorization::LeftSolveLWithNonZeros(
// use a different algorithm.
x->swap(*result_before_permutation);
x->AssignToZero(inverse_row_perm_.size());
non_zeros->clear();
y->non_zeros.clear();
for (RowIndex row(0); row <= last_non_zero_row; ++row) {
const Fractional value = (*result_before_permutation)[row];
if (value != 0.0) {
const RowIndex permuted_row = inverse_row_perm_[row];
(*x)[permuted_row] = value;
non_zeros->push_back(RowToColIndex(permuted_row));
y->non_zeros.push_back(RowToColIndex(permuted_row));
}
}
return true;
}
}
ColIndex LuFactorization::LeftSolveUForUnitRow(
ColIndex col, DenseRow* y, std::vector<ColIndex>* non_zeros) const {
ColIndex LuFactorization::LeftSolveUForUnitRow(ColIndex col,
ScatteredRow* y) const {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsAllZero(*y));
DCHECK(non_zeros->empty());
DCHECK(IsAllZero(y->values));
DCHECK(y->non_zeros.empty());
if (is_identity_factorization_) {
(*y)[col] = 1.0;
non_zeros->push_back(col);
y->non_zeros.push_back(col);
return col;
}
const ColIndex permuted_col = col_perm_.empty() ? col : col_perm_[col];
(*y)[permuted_col] = 1.0;
non_zeros->push_back(permuted_col);
y->non_zeros.push_back(permuted_col);
// Using the transposed matrix here is faster (even accounting the time to
// construct it). Note the small optimization in case the inversion is
@@ -470,10 +465,10 @@ ColIndex LuFactorization::LeftSolveUForUnitRow(
if (transpose_upper_.ColumnIsDiagonalOnly(permuted_col)) {
(*y)[permuted_col] /= transpose_upper_.GetDiagonalCoefficient(permuted_col);
} else {
RowIndexVector* const nz = reinterpret_cast<RowIndexVector*>(non_zeros);
DenseColumn* const x = reinterpret_cast<DenseColumn*>(y);
RowIndexVector* const nz = reinterpret_cast<RowIndexVector*>(&y->non_zeros);
DenseColumn* const x = reinterpret_cast<DenseColumn*>(&y->values);
transpose_upper_.ComputeRowsToConsiderInSortedOrder(nz);
if (non_zeros->empty()) {
if (y->non_zeros.empty()) {
transpose_upper_.LowerSolveStartingAt(permuted_col, x);
} else {
transpose_upper_.HyperSparseSolve(x, nz);

View File

@@ -27,9 +27,6 @@ namespace glop {
// algorithms. The actual algorithm is in markowitz.h and .cc. This class holds
// all the Solve() functions that deal with the permutations and the L and U
// factors once they are computed.
//
// TODO(user): Add a ScatteredColumn class containing a DenseColumn and
// an EntryRowIndexVector non-zero pattern.
class LuFactorization {
public:
LuFactorization();
@@ -105,15 +102,14 @@ class LuFactorization {
void LeftSolveU(DenseRow* y) const;
void LeftSolveL(DenseRow* y) const;
// Specialized version of RightSolveL() that takes a SparseColumn (or a
// ScatteredColumnReference) as input. non_zeros will either be cleared or set
// to the non zeros of the result. Important: the output x must be of the
// correct size and all zero.
void RightSolveLForSparseColumn(const SparseColumn& b, DenseColumn* x,
std::vector<RowIndex>* non_zeros) const;
void RightSolveLForScatteredColumn(const ScatteredColumnReference& b,
DenseColumn* x,
std::vector<RowIndex>* non_zeros) const;
// Specialized version of RightSolveL() that takes a SparseColumn or a
// ScatteredColumn as input. non_zeros will either be cleared or set to the
// non zeros of the result. Important: the output x must be of the correct
// size and all zero.
void RightSolveLForSparseColumn(const SparseColumn& b,
ScatteredColumn* x) const;
void RightSolveLForScatteredColumn(const ScatteredColumn& b,
ScatteredColumn* x) const;
// Specialized version of RightSolveL() where x is originaly equal to
// 'a' permuted by row_perm_. Note that 'a' is only used for DCHECK or when
@@ -126,23 +122,20 @@ class LuFactorization {
// It also returns the value of col permuted by Q (which is the position
// of the unit-vector rhs in the solve system: y.U = rhs).
// Important: the output y must be of the correct size and all zero.
ColIndex LeftSolveUForUnitRow(ColIndex col, DenseRow* y,
std::vector<ColIndex>* non_zeros) const;
ColIndex LeftSolveUForUnitRow(ColIndex col, ScatteredRow* y) const;
// Specialized version of RightSolveU() and LeftSolveU() that may exploit the
// initial non-zeros if it is non-empty. In addition,
// RightSolveUWithNonZeros() always return the non-zeros of the output.
void RightSolveUWithNonZeros(DenseColumn* x,
std::vector<RowIndex>* non_zeros) const;
void LeftSolveUWithNonZeros(DenseRow* y,
std::vector<ColIndex>* non_zeros) const;
void RightSolveUWithNonZeros(ScatteredColumn* x) const;
void LeftSolveUWithNonZeros(ScatteredRow* y) const;
// Specialized version of LeftSolveL() that also computes the non-zero
// pattern of the output. Moreover, if result_before_permutation is not NULL,
// it is filled with the result just before row_perm_ is applied to it and
// true is returned. If result_before_permutation is not filled, then false is
// returned.
bool LeftSolveLWithNonZeros(DenseRow* y, ColIndexVector* non_zeros,
bool LeftSolveLWithNonZeros(ScatteredRow* y,
DenseColumn* result_before_permutation) const;
// Returns the given column of U.

View File

@@ -76,6 +76,7 @@
#include <queue>
#include "ortools/base/logging.h"
#include "ortools/base/inlined_vector.h"
#include "ortools/glop/parameters.pb.h"
#include "ortools/glop/status.h"
#include "ortools/lp_data/lp_types.h"

View File

@@ -66,7 +66,7 @@ const DenseRow& PrimalEdgeNorms::GetMatrixColumnNorms() {
}
void PrimalEdgeNorms::TestEnteringEdgeNormPrecision(
ColIndex entering_col, ScatteredColumnReference direction) {
ColIndex entering_col, const ScatteredColumn& direction) {
if (!recompute_edge_squared_norms_) {
SCOPED_TIME_STAT(&stats_);
// Recompute the squared norm of the edge used during this
@@ -92,7 +92,7 @@ void PrimalEdgeNorms::TestEnteringEdgeNormPrecision(
void PrimalEdgeNorms::UpdateBeforeBasisPivot(ColIndex entering_col,
ColIndex leaving_col,
RowIndex leaving_row,
ScatteredColumnReference direction,
const ScatteredColumn& direction,
UpdateRow* update_row) {
SCOPED_TIME_STAT(&stats_);
DCHECK_NE(entering_col, leaving_col);
@@ -100,7 +100,7 @@ void PrimalEdgeNorms::UpdateBeforeBasisPivot(ColIndex entering_col,
update_row->ComputeUpdateRow(leaving_row);
ComputeDirectionLeftInverse(entering_col, direction);
UpdateEdgeSquaredNorms(entering_col, leaving_col, leaving_row,
direction.dense_column, *update_row);
direction.values, *update_row);
}
if (!reset_devex_weights_) {
// Resets devex weights once in a while. If so, no need to update them
@@ -112,7 +112,7 @@ void PrimalEdgeNorms::UpdateBeforeBasisPivot(ColIndex entering_col,
} else {
update_row->ComputeUpdateRow(leaving_row);
UpdateDevexWeights(entering_col, leaving_col, leaving_row,
direction.dense_column, *update_row);
direction.values, *update_row);
}
}
}
@@ -143,44 +143,42 @@ void PrimalEdgeNorms::ComputeEdgeSquaredNorms() {
}
void PrimalEdgeNorms::ComputeDirectionLeftInverse(
ColIndex entering_col, ScatteredColumnReference direction) {
ColIndex entering_col, const ScatteredColumn& direction) {
SCOPED_TIME_STAT(&stats_);
// Initialize direction_left_inverse_ to direction.dense_column .
// Initialize direction_left_inverse_ to direction.values .
// Note the special case when the non-zero vector is empty which means we
// don't know and need to use the dense version.
const ColIndex size = RowToColIndex(direction.dense_column.size());
const ColIndex size = RowToColIndex(direction.values.size());
const double kThreshold = 0.05 * size.value();
if (!direction_left_inverse_non_zeros_.empty() &&
(direction_left_inverse_non_zeros_.size() +
direction.non_zero_rows.size() <
if (!direction_left_inverse_.non_zeros.empty() &&
(direction_left_inverse_.non_zeros.size() + direction.non_zeros.size() <
2 * kThreshold)) {
ClearAndResizeVectorWithNonZeros(size, &direction_left_inverse_,
&direction_left_inverse_non_zeros_);
for (const RowIndex row : direction.non_zero_rows) {
direction_left_inverse_[RowToColIndex(row)] = direction.dense_column[row];
ClearAndResizeVectorWithNonZeros(size, &direction_left_inverse_);
for (const RowIndex row : direction.non_zeros) {
direction_left_inverse_[RowToColIndex(row)] = direction[row];
}
} else {
direction_left_inverse_ = Transpose(direction.dense_column);
direction_left_inverse_non_zeros_.clear();
direction_left_inverse_.values = Transpose(direction.values);
direction_left_inverse_.non_zeros.clear();
}
// Depending on the sparsity of the input, we decide which version to use.
if (direction.non_zero_rows.size() < kThreshold) {
direction_left_inverse_non_zeros_ =
*reinterpret_cast<ColIndexVector const*>(&direction.non_zero_rows);
basis_factorization_.LeftSolveWithNonZeros(
&direction_left_inverse_, &direction_left_inverse_non_zeros_);
if (direction.non_zeros.size() < kThreshold) {
direction_left_inverse_.non_zeros =
*reinterpret_cast<ColIndexVector const*>(&direction.non_zeros);
basis_factorization_.LeftSolveWithNonZeros(&direction_left_inverse_);
} else {
basis_factorization_.LeftSolve(&direction_left_inverse_);
basis_factorization_.LeftSolve(&direction_left_inverse_.values);
}
// TODO(user): Refactorize if estimated accuracy above a threshold.
IF_STATS_ENABLED(stats_.direction_left_inverse_accuracy.Add(
ScalarProduct(direction_left_inverse_, matrix_.column(entering_col)) -
SquaredNorm(direction.dense_column)));
ScalarProduct(direction_left_inverse_.values,
matrix_.column(entering_col)) -
SquaredNorm(direction.values)));
IF_STATS_ENABLED(stats_.direction_left_inverse_density.Add(
Density(direction_left_inverse_)));
Density(direction_left_inverse_.values)));
}
// Let new_edge denote the edge of 'col' in the new basis. We want:
@@ -219,8 +217,8 @@ void PrimalEdgeNorms::UpdateEdgeSquaredNorms(ColIndex entering_col,
if (num_omp_threads == 1) {
for (const ColIndex col : update_row.GetNonZeroPositions()) {
const Fractional coeff = update_row.GetCoefficient(col);
const Fractional scalar_product =
compact_matrix_.ColumnScalarProduct(col, direction_left_inverse_);
const Fractional scalar_product = compact_matrix_.ColumnScalarProduct(
col, direction_left_inverse_.values);
num_operations_ += compact_matrix_.column(col).num_entries().value();
// Update the edge squared norm of this column. Note that the update
@@ -256,8 +254,8 @@ void PrimalEdgeNorms::UpdateEdgeSquaredNorms(ColIndex entering_col,
for (int i = 0; i < parallel_loop_size; i++) {
const ColIndex col(relevant_rows[i]);
const Fractional coeff = update_row.GetCoefficient(col);
const Fractional scalar_product =
compact_matrix_.ColumnScalarProduct(col, direction_left_inverse_);
const Fractional scalar_product = compact_matrix_.ColumnScalarProduct(
col, direction_left_inverse_.values);
edge_squared_norms_[col] +=
coeff * (coeff * leaving_squared_norm + factor * scalar_product);
const Fractional lower_bound = 1.0 + Square(coeff / pivot);

View File

@@ -92,7 +92,7 @@ class PrimalEdgeNorms {
// GlopParameters). As a side effect, this replace the entering_col edge
// norm with its precise version.
void TestEnteringEdgeNormPrecision(ColIndex entering_col,
ScatteredColumnReference direction);
const ScatteredColumn& direction);
// Updates any internal data BEFORE the given simplex pivot is applied to B.
// Note that no updates are needed in case of a bound flip.
@@ -103,7 +103,7 @@ class PrimalEdgeNorms {
// - The update row (see UpdateRow), which will only be computed if needed.
void UpdateBeforeBasisPivot(ColIndex entering_col, ColIndex leaving_col,
RowIndex leaving_row,
ScatteredColumnReference direction,
const ScatteredColumn& direction,
UpdateRow* update_row);
// Sets the algorithm parameters.
@@ -145,7 +145,7 @@ class PrimalEdgeNorms {
// Compute the left inverse of the direction.
// The first argument is there for checking precision.
void ComputeDirectionLeftInverse(ColIndex entering_col,
ScatteredColumnReference direction);
const ScatteredColumn& direction);
// Updates edges_squared_norm_ according to the given pivot.
void UpdateEdgeSquaredNorms(ColIndex entering_col, ColIndex leaving_col,
@@ -195,8 +195,7 @@ class PrimalEdgeNorms {
// steepest edge paper. Its scalar product with a column 'a' of A gives the
// value of the scalar product of the 'direction' with the right inverse of
// 'a'.
DenseRow direction_left_inverse_;
ColIndexVector direction_left_inverse_non_zeros_;
ScatteredRow direction_left_inverse_;
// Used by DeterministicTime().
int64 num_operations_;

View File

@@ -63,15 +63,14 @@ class RankOneUpdateElementaryMatrix {
-storage_->ColumnScalarProduct(v_index_, Transpose(*x)) / mu_;
storage_->ColumnAddMultipleToDenseColumn(u_index_, multiplier, x);
}
void RightSolveWithNonZeros(DenseColumn* x,
StrictITIVector<RowIndex, bool>* is_non_zero,
std::vector<RowIndex>* non_zeros) const {
void RightSolveWithNonZeros(
ScatteredColumn* x, StrictITIVector<RowIndex, bool>* is_non_zero) const {
DCHECK(!IsSingular());
const Fractional multiplier =
-storage_->ColumnScalarProduct(v_index_, Transpose(*x)) / mu_;
-storage_->ColumnScalarProduct(v_index_, Transpose(x->values)) / mu_;
if (multiplier != 0.0) {
storage_->ColumnAddMultipleToDenseColumnAndUpdateNonZeros(
u_index_, multiplier, x, is_non_zero, non_zeros);
u_index_, multiplier, &x->values, is_non_zero, &x->non_zeros);
}
}
@@ -84,17 +83,16 @@ class RankOneUpdateElementaryMatrix {
storage_->ColumnAddMultipleToDenseColumn(v_index_, multiplier,
reinterpret_cast<DenseColumn*>(y));
}
void LeftSolveWithNonZeros(DenseRow* y,
StrictITIVector<ColIndex, bool>* is_non_zero,
std::vector<ColIndex>* non_zeros) const {
void LeftSolveWithNonZeros(
ScatteredRow* y, StrictITIVector<ColIndex, bool>* is_non_zero) const {
DCHECK(!IsSingular());
const Fractional multiplier =
-storage_->ColumnScalarProduct(u_index_, *y) / mu_;
-storage_->ColumnScalarProduct(u_index_, y->values) / mu_;
if (multiplier != 0.0) {
storage_->ColumnAddMultipleToDenseColumnAndUpdateNonZeros(
v_index_, multiplier, reinterpret_cast<DenseColumn*>(y),
v_index_, multiplier, reinterpret_cast<DenseColumn*>(&y->values),
reinterpret_cast<StrictITIVector<RowIndex, bool>*>(is_non_zero),
reinterpret_cast<std::vector<RowIndex>*>(non_zeros));
reinterpret_cast<std::vector<RowIndex>*>(&y->non_zeros));
}
}
@@ -166,31 +164,29 @@ class RankOneUpdateFactorization {
// Same as LeftSolve(), but if the given non_zeros are not empty, then all
// the new non-zeros in the result are happended to it.
void LeftSolveWithNonZeros(DenseRow* y,
std::vector<ColIndex>* non_zeros) const {
void LeftSolveWithNonZeros(ScatteredRow* y) const {
RETURN_IF_NULL(y);
if (non_zeros->empty()) {
LeftSolve(y);
if (y->non_zeros.empty()) {
LeftSolve(&y->values);
return;
}
// tmp_row_is_non_zero_ is always all false before and after this code.
tmp_col_is_non_zero_.resize(y->size(), false);
tmp_col_is_non_zero_.resize(y->values.size(), false);
DCHECK(std::all_of(tmp_col_is_non_zero_.begin(), tmp_col_is_non_zero_.end(),
[](bool v) { return !v; }));
for (const ColIndex col : *non_zeros) tmp_col_is_non_zero_[col] = true;
for (const ColIndex col : y->non_zeros) tmp_col_is_non_zero_[col] = true;
const int hypersparse_threshold = static_cast<int>(
hypersparse_ratio_ * static_cast<double>(y->size().value()));
hypersparse_ratio_ * static_cast<double>(y->values.size().value()));
for (int i = elementary_matrices_.size() - 1; i >= 0; --i) {
if (non_zeros->size() < hypersparse_threshold) {
elementary_matrices_[i].LeftSolveWithNonZeros(y, &tmp_col_is_non_zero_,
non_zeros);
if (y->non_zeros.size() < hypersparse_threshold) {
elementary_matrices_[i].LeftSolveWithNonZeros(y, &tmp_col_is_non_zero_);
} else {
elementary_matrices_[i].LeftSolve(y);
elementary_matrices_[i].LeftSolve(&y->values);
}
}
for (const ColIndex col : *non_zeros) tmp_col_is_non_zero_[col] = false;
if (non_zeros->size() >= hypersparse_threshold) non_zeros->clear();
for (const ColIndex col : y->non_zeros) tmp_col_is_non_zero_[col] = false;
if (y->non_zeros.size() >= hypersparse_threshold) y->non_zeros.clear();
}
// Right-solves all systems from left to right, i.e. T_i.d_{i+1} = d_i
@@ -204,32 +200,31 @@ class RankOneUpdateFactorization {
// Same as RightSolve(), but if the given non_zeros are not empty, then all
// the new non-zeros in the result are happended to it.
void RightSolveWithNonZeros(DenseColumn* d,
std::vector<RowIndex>* non_zeros) const {
void RightSolveWithNonZeros(ScatteredColumn* d) const {
RETURN_IF_NULL(d);
if (non_zeros->empty()) {
RightSolve(d);
if (d->non_zeros.empty()) {
RightSolve(&d->values);
return;
}
// tmp_row_is_non_zero_ is always all false before and after this code.
tmp_row_is_non_zero_.resize(d->size(), false);
tmp_row_is_non_zero_.resize(d->values.size(), false);
DCHECK(std::all_of(tmp_row_is_non_zero_.begin(), tmp_row_is_non_zero_.end(),
[](bool v) { return !v; }));
for (const RowIndex row : *non_zeros) tmp_row_is_non_zero_[row] = true;
for (const RowIndex row : d->non_zeros) tmp_row_is_non_zero_[row] = true;
const size_t end = elementary_matrices_.size();
const int hypersparse_threshold = static_cast<int>(
hypersparse_ratio_ * static_cast<double>(d->size().value()));
hypersparse_ratio_ * static_cast<double>(d->values.size().value()));
for (int i = 0; i < end; ++i) {
if (non_zeros->size() < hypersparse_threshold) {
elementary_matrices_[i].RightSolveWithNonZeros(d, &tmp_row_is_non_zero_,
non_zeros);
if (d->non_zeros.size() < hypersparse_threshold) {
elementary_matrices_[i].RightSolveWithNonZeros(d,
&tmp_row_is_non_zero_);
} else {
elementary_matrices_[i].RightSolve(d);
elementary_matrices_[i].RightSolve(&d->values);
}
}
for (const RowIndex row : *non_zeros) tmp_row_is_non_zero_[row] = false;
if (non_zeros->size() >= hypersparse_threshold) non_zeros->clear();
for (const RowIndex row : d->non_zeros) tmp_row_is_non_zero_[row] = false;
if (d->non_zeros.size() >= hypersparse_threshold) d->non_zeros.clear();
}
EntryIndex num_entries() const { return num_entries_; }

View File

@@ -56,7 +56,7 @@ bool ReducedCosts::NeedsBasisRefactorization() const {
}
bool ReducedCosts::TestEnteringReducedCostPrecision(
ColIndex entering_col, ScatteredColumnReference direction,
ColIndex entering_col, const ScatteredColumn& direction,
Fractional* reduced_cost) {
SCOPED_TIME_STAT(&stats_);
if (recompute_basic_objective_) {
@@ -171,7 +171,7 @@ Fractional ReducedCosts::ComputeSumOfDualInfeasibilities() const {
void ReducedCosts::UpdateBeforeBasisPivot(ColIndex entering_col,
RowIndex leaving_row,
const DenseColumn& direction,
const ScatteredColumn& direction,
UpdateRow* update_row) {
SCOPED_TIME_STAT(&stats_);
const ColIndex leaving_col = basis_[leaving_row];
@@ -485,12 +485,12 @@ void ReducedCosts::UpdateReducedCosts(ColIndex entering_col,
// Always update the slack variable position so we have the dual values and
// we can use them in ComputeCurrentDualResidualError().
ScatteredColumnReference unit_row_left_inverse =
const ScatteredRow& unit_row_left_inverse =
update_row->GetUnitRowLeftInverse();
for (const RowIndex row : unit_row_left_inverse.non_zero_rows) {
const ColIndex col = first_slack_col + RowToColIndex(row);
const Fractional coeff = unit_row_left_inverse.dense_column[row];
reduced_costs_[col] += new_leaving_reduced_cost * coeff;
for (const ColIndex col : unit_row_left_inverse.non_zeros) {
const ColIndex slack_col = first_slack_col + col;
const Fractional coeff = unit_row_left_inverse[col];
reduced_costs_[slack_col] += new_leaving_reduced_cost * coeff;
}
reduced_costs_[leaving_col] = new_leaving_reduced_cost;

View File

@@ -62,7 +62,7 @@ class ReducedCosts {
// or false if this column is actually not good and ChooseEnteringColumn()
// need to be called again.
bool TestEnteringReducedCostPrecision(ColIndex entering_col,
ScatteredColumnReference direction,
const ScatteredColumn& direction,
Fractional* reduced_cost);
// Computes the current dual residual and infeasibility. Note that these
@@ -80,7 +80,7 @@ class ReducedCosts {
// - The index in B of the leaving basic variable.
// - The 'direction', i.e. the right inverse of the entering column.
void UpdateBeforeBasisPivot(ColIndex entering_col, RowIndex leaving_row,
const DenseColumn& direction,
const ScatteredColumn& direction,
UpdateRow* update_row);
// Once a pivot has been done, this need to be called on the column that just

View File

@@ -784,48 +784,52 @@ bool RevisedSimplex::InitializeBoundsAndTestIfUnchanged(
bound_perturbation_.assign(num_cols_, 0.0);
// Variable bounds, for both non-slack and slack variables.
DCHECK_EQ(lp.num_variables(), num_cols_);
bool bounds_are_unchanged = true;
DCHECK_EQ(lp.num_variables(), num_cols_);
for (ColIndex col(0); col < lp.num_variables(); ++col) {
if (lower_bound_[col] != lp.variable_lower_bounds()[col] ||
upper_bound_[col] != lp.variable_upper_bounds()[col]) {
bounds_are_unchanged = false;
break;
}
lower_bound_[col] = lp.variable_lower_bounds()[col];
upper_bound_[col] = lp.variable_upper_bounds()[col];
}
if (!bounds_are_unchanged) {
lower_bound_ = lp.variable_lower_bounds();
upper_bound_ = lp.variable_upper_bounds();
}
return bounds_are_unchanged;
}
bool RevisedSimplex::InitializeObjectiveAndTestIfUnchanged(
const LinearProgram& lp) {
DCHECK_EQ(num_cols_, lp.num_variables());
SCOPED_TIME_STAT(&function_stats_);
// Note that we use the minimization version of the objective.
bool objective_is_unchanged = true;
objective_.resize(num_cols_, 0.0);
for (ColIndex col(0); col < lp.num_variables(); ++col) {
if (objective_[col] !=
lp.GetObjectiveCoefficientForMinimizationVersion(col)) {
objective_is_unchanged = false;
}
objective_[col] = lp.GetObjectiveCoefficientForMinimizationVersion(col);
}
for (ColIndex col(lp.num_variables()); col < num_cols_; ++col) {
if (objective_[col] != 0.0) {
objective_is_unchanged = false;
}
objective_[col] = 0.0;
}
// Sets the members needed to display the objective correctly.
objective_offset_ = lp.objective_offset();
objective_scaling_factor_ = lp.objective_scaling_factor();
DCHECK_EQ(num_cols_, lp.num_variables());
if (lp.IsMaximizationProblem()) {
objective_offset_ = -objective_offset_;
objective_scaling_factor_ = -objective_scaling_factor_;
// Note that we use the minimization version of the objective internally.
for (ColIndex col(0); col < lp.num_variables(); ++col) {
const Fractional coeff = -lp.objective_coefficients()[col];
if (objective_[col] != coeff) {
objective_is_unchanged = false;
}
objective_[col] = coeff;
}
objective_offset_ = -lp.objective_offset();
objective_scaling_factor_ = -lp.objective_scaling_factor();
} else {
for (ColIndex col(0); col < lp.num_variables(); ++col) {
if (objective_[col] != lp.objective_coefficients()[col]) {
objective_is_unchanged = false;
break;
}
}
if (!objective_is_unchanged) {
objective_ = lp.objective_coefficients();
}
objective_offset_ = lp.objective_offset();
objective_scaling_factor_ = lp.objective_scaling_factor();
}
return objective_is_unchanged;
}
@@ -1332,23 +1336,22 @@ void RevisedSimplex::ComputeVariableValuesError() {
void RevisedSimplex::ComputeDirection(ColIndex col) {
SCOPED_TIME_STAT(&function_stats_);
DCHECK_COL_BOUNDS(col);
basis_factorization_.RightSolveForProblemColumn(col, &direction_,
&direction_non_zero_);
basis_factorization_.RightSolveForProblemColumn(col, &direction_);
direction_infinity_norm_ = 0.0;
for (const RowIndex row : direction_non_zero_) {
for (const RowIndex row : direction_.non_zeros) {
direction_infinity_norm_ =
std::max(direction_infinity_norm_, std::abs(direction_[row]));
}
IF_STATS_ENABLED(ratio_test_stats_.direction_density.Add(
num_rows_ == 0 ? 0.0
: static_cast<double>(direction_non_zero_.size()) /
: static_cast<double>(direction_.non_zeros.size()) /
static_cast<double>(num_rows_.value())));
}
Fractional RevisedSimplex::ComputeDirectionError(ColIndex col) {
SCOPED_TIME_STAT(&function_stats_);
compact_matrix_.ColumnCopyToDenseColumn(col, &error_);
for (const RowIndex row : direction_non_zero_) {
for (const RowIndex row : direction_.non_zeros) {
compact_matrix_.ColumnAddMultipleToDenseColumn(col, -direction_[row],
&error_);
}
@@ -1404,7 +1407,7 @@ Fractional RevisedSimplex::ComputeHarrisRatioAndLeavingCandidates(
Fractional harris_ratio = bound_flip_ratio;
leaving_candidates->Clear();
const Fractional threshold = parameters_.ratio_test_zero_threshold();
for (const RowIndex row : direction_non_zero_) {
for (const RowIndex row : direction_.non_zeros) {
const Fractional magnitude = std::abs(direction_[row]);
if (magnitude < threshold) continue;
Fractional ratio = GetRatio<is_entering_reduced_cost_positive>(row);
@@ -1731,7 +1734,7 @@ void RevisedSimplex::PrimalPhaseIChooseLeavingVariableRow(
std::vector<BreakPoint> breakpoints;
const Fractional tolerance = parameters_.primal_feasibility_tolerance();
for (RowIndex row : direction_non_zero_) {
for (RowIndex row : direction_.non_zeros) {
const Fractional direction =
reduced_cost > 0.0 ? direction_[row] : -direction_[row];
const Fractional magnitude = std::abs(direction);
@@ -1898,7 +1901,7 @@ void RevisedSimplex::DualPhaseIUpdatePrice(RowIndex leaving_row,
// direction).
const Fractional step =
dual_pricing_vector_[leaving_row] / direction_[leaving_row];
for (const RowIndex row : direction_non_zero_) {
for (const RowIndex row : direction_.non_zeros) {
dual_pricing_vector_[row] -= direction_[row] * step;
is_dual_entering_candidate_.Set(
row,
@@ -1949,21 +1952,21 @@ void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
++num_dual_infeasible_positions_;
}
if (!something_to_do) {
initially_all_zero_scratchpad_.resize(num_rows_, 0.0);
initially_all_zero_scratchpad_.values.resize(num_rows_, 0.0);
something_to_do = true;
}
compact_matrix_.ColumnAddMultipleToDenseColumn(
col, sign - dual_infeasibility_improvement_direction_[col],
&initially_all_zero_scratchpad_);
&initially_all_zero_scratchpad_.values);
dual_infeasibility_improvement_direction_[col] = sign;
}
}
if (something_to_do) {
const VariableTypeRow& variable_type = variables_info_.GetTypeRow();
const Fractional threshold = parameters_.ratio_test_zero_threshold();
basis_factorization_.RightSolveWithNonZeros(&initially_all_zero_scratchpad_,
&row_index_vector_scratchpad_);
for (const RowIndex row : row_index_vector_scratchpad_) {
basis_factorization_.RightSolveWithNonZeros(
&initially_all_zero_scratchpad_);
for (const RowIndex row : initially_all_zero_scratchpad_.non_zeros) {
dual_pricing_vector_[row] += initially_all_zero_scratchpad_[row];
initially_all_zero_scratchpad_[row] = 0.0;
is_dual_entering_candidate_.Set(
@@ -1971,6 +1974,7 @@ void RevisedSimplex::DualPhaseIUpdatePriceOnReducedCostChange(
IsDualPhaseILeavingCandidate(dual_pricing_vector_[row],
variable_type[basis_[row]], threshold));
}
initially_all_zero_scratchpad_.non_zeros.clear();
}
}
@@ -2179,8 +2183,8 @@ Status RevisedSimplex::UpdateAndPivot(ColIndex entering_col,
target_bound);
}
UpdateBasis(entering_col, leaving_row, leaving_variable_status);
GLOP_RETURN_IF_ERROR(basis_factorization_.Update(
entering_col, leaving_row, direction_non_zero_, &direction_));
GLOP_RETURN_IF_ERROR(
basis_factorization_.Update(entering_col, leaving_row, direction_));
if (basis_factorization_.IsRefactorized()) {
PermuteBasis();
}
@@ -2277,9 +2281,9 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
return Status::OK();
}
} else if (feasibility_phase_) {
// Note that direction_non_zero_ contains the positions of the basic
// Note that direction_.non_zeros contains the positions of the basic
// variables whose values were updated during the last iteration.
UpdatePrimalPhaseICosts(direction_non_zero_);
UpdatePrimalPhaseICosts(direction_.non_zeros);
}
Fractional reduced_cost = 0.0;
@@ -2316,13 +2320,10 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
// Solve the system B.d = a with a the entering column.
ComputeDirection(entering_col);
primal_edge_norms_.TestEnteringEdgeNormPrecision(
entering_col,
ScatteredColumnReference(direction_, direction_non_zero_));
primal_edge_norms_.TestEnteringEdgeNormPrecision(entering_col,
direction_);
if (!reduced_costs_.TestEnteringReducedCostPrecision(
entering_col,
ScatteredColumnReference(direction_, direction_non_zero_),
&reduced_cost)) {
entering_col, direction_, &reduced_cost)) {
VLOG(1) << "Skipping col #" << entering_col << " whose reduced cost is "
<< reduced_cost;
continue;
@@ -2418,13 +2419,10 @@ Status RevisedSimplex::Minimize(TimeLimit* time_limit) {
}
}
variable_values_.UpdateOnPivoting(
ScatteredColumnReference(direction_, direction_non_zero_), entering_col,
step);
variable_values_.UpdateOnPivoting(direction_, entering_col, step);
if (leaving_row != kInvalidRow) {
primal_edge_norms_.UpdateBeforeBasisPivot(
entering_col, basis_[leaving_row], leaving_row,
ScatteredColumnReference(direction_, direction_non_zero_),
entering_col, basis_[leaving_row], leaving_row, direction_,
&update_row_);
reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row,
direction_, &update_row_);
@@ -2589,10 +2587,10 @@ Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) {
/*update_basic_values=*/true);
bound_flip_candidates.clear();
// The direction_non_zero_ contains the positions for which the basic
// The direction_.non_zeros contains the positions for which the basic
// variable value was changed during the previous iterations.
variable_values_.UpdatePrimalInfeasibilityInformation(
direction_non_zero_);
direction_.non_zeros);
}
}
@@ -2649,7 +2647,8 @@ Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) {
problem_status_ = ProblemStatus::ABNORMAL;
} else {
problem_status_ = ProblemStatus::DUAL_UNBOUNDED;
solution_dual_ray_ = update_row_.GetUnitRowLeftInverse().dense_column;
solution_dual_ray_ =
Transpose(update_row_.GetUnitRowLeftInverse().values);
update_row_.RecomputeFullUpdateRow(leaving_row);
solution_dual_ray_row_combination_.assign(num_cols_, 0.0);
for (const ColIndex col : update_row_.GetNonZeroPositions()) {
@@ -2717,16 +2716,13 @@ Status RevisedSimplex::DualMinimize(TimeLimit* time_limit) {
} else {
primal_step =
ComputeStepToMoveBasicVariableToBound(leaving_row, target_bound);
variable_values_.UpdateOnPivoting(
ScatteredColumnReference(direction_, direction_non_zero_),
entering_col, primal_step);
variable_values_.UpdateOnPivoting(direction_, entering_col, primal_step);
}
reduced_costs_.UpdateBeforeBasisPivot(entering_col, leaving_row, direction_,
&update_row_);
dual_edge_norms_.UpdateBeforeBasisPivot(
entering_col, leaving_row,
ScatteredColumnReference(direction_, direction_non_zero_),
entering_col, leaving_row, direction_,
update_row_.GetUnitRowLeftInverse());
// It is important to do the actual pivot after the update above!
@@ -2928,7 +2924,7 @@ ITIVector<RowIndex, SparseRow> RevisedSimplex::ComputeDictionary(
ITIVector<RowIndex, SparseRow> dictionary(num_rows_.value());
for (ColIndex col(0); col < num_cols_; ++col) {
ComputeDirection(col);
for (const RowIndex row : direction_non_zero_) {
for (const RowIndex row : direction_.non_zeros) {
const Fractional scale_coefficient =
scaler == nullptr
? 1.0

View File

@@ -635,11 +635,8 @@ class RevisedSimplex {
DenseColumn dual_pricing_vector_;
DenseBitColumn is_dual_entering_candidate_;
// A temporary dense column that is always reset to all zero after use.
DenseColumn initially_all_zero_scratchpad_;
// A temporary RowIndexVector used to hold the non-zero positions of a column.
RowIndexVector row_index_vector_scratchpad_;
// A temporary scattered column that is always reset to all zero after use.
ScatteredColumn initially_all_zero_scratchpad_;
// Array of column index, giving the column number corresponding
// to a given basis row.
@@ -665,10 +662,9 @@ class RevisedSimplex {
// This is known as 'd' in the literature and is set during each pivot to the
// right inverse of the basic entering column of A by ComputeDirection().
// ComputeDirection() also fills direction_non_zero_ with the position of the
// ComputeDirection() also fills direction_.non_zeros with the position of the
// non-zero.
DenseColumn direction_;
std::vector<RowIndex> direction_non_zero_;
ScatteredColumn direction_;
Fractional direction_infinity_norm_;
// Subpart of direction_ that was ignored during the ratio test. This is only

View File

@@ -38,6 +38,7 @@ using operations_research::glop::Fractional;
using operations_research::glop::GetProblemStatusString;
using operations_research::glop::ProblemStatus;
using operations_research::glop::RowIndex;
using operations_research::glop::ScatteredRow;
using operations_research::glop::VariableStatus;
// Struct storing all the state used by the functions in this file.
@@ -1228,11 +1229,10 @@ SCIP_RETCODE SCIPlpiGetBInvRow(SCIP_LPI* lpi, // LP interface structure
) {
SCOPED_TIME_STAT(lpi->stats);
CHECK_NOTNULL(coef);
DenseRow solution;
ColIndexVector non_zero_positions;
ScatteredRow solution;
lpi->solver->GetBasisFactorization()
.LeftSolveForUnitRow(ColIndex(r), &solution, &non_zero_positions);
const ColIndex num_cols = solution.size();
.LeftSolveForUnitRow(ColIndex(r), &solution);
const ColIndex num_cols = solution.values.size();
for (ColIndex col(0); col < num_cols; ++col) {
coef[col.value()] = solution[col];
}

View File

@@ -52,24 +52,23 @@ void UpdateRow::IgnoreUpdatePosition(ColIndex col) {
coefficient_[col] = 0.0;
}
ScatteredColumnReference UpdateRow::GetUnitRowLeftInverse() const {
const ScatteredRow& UpdateRow::GetUnitRowLeftInverse() const {
DCHECK(!compute_update_row_);
return ScatteredColumnReference(unit_row_left_inverse_,
unit_row_left_inverse_non_zeros_);
return unit_row_left_inverse_;
}
void UpdateRow::ComputeUnitRowLeftInverse(RowIndex leaving_row) {
SCOPED_TIME_STAT(&stats_);
basis_factorization_.LeftSolveForUnitRow(RowToColIndex(leaving_row),
&unit_row_left_inverse_,
&unit_row_left_inverse_non_zeros_);
&unit_row_left_inverse_);
// TODO(user): Refactorize if the estimated accuracy is above a threshold.
IF_STATS_ENABLED(stats_.unit_row_left_inverse_accuracy.Add(
matrix_.ColumnScalarProduct(basis_[leaving_row], unit_row_left_inverse_) -
matrix_.ColumnScalarProduct(basis_[leaving_row],
unit_row_left_inverse_.values) -
1.0));
IF_STATS_ENABLED(stats_.unit_row_left_inverse_density.Add(
static_cast<double>(unit_row_left_inverse_non_zeros_.size()) /
static_cast<double>(unit_row_left_inverse_.non_zeros.size()) /
static_cast<double>(matrix_.num_rows().value())));
}
@@ -82,7 +81,7 @@ void UpdateRow::ComputeUpdateRow(RowIndex leaving_row) {
if (parameters_.use_transposed_matrix()) {
// Number of entries that ComputeUpdatesRowWise() will need to look at.
EntryIndex num_row_wise_entries(0);
for (const ColIndex col : unit_row_left_inverse_non_zeros_) {
for (const ColIndex col : unit_row_left_inverse_.non_zeros) {
num_row_wise_entries += transposed_matrix_.ColumnNumEntries(col);
}
@@ -121,8 +120,8 @@ void UpdateRow::ComputeUpdateRow(RowIndex leaving_row) {
void UpdateRow::ComputeUpdateRowForBenchmark(const DenseRow& lhs,
const std::string& algorithm) {
unit_row_left_inverse_ = lhs;
ComputeNonZeros(lhs, &unit_row_left_inverse_non_zeros_);
unit_row_left_inverse_.values = lhs;
ComputeNonZeros(lhs, &unit_row_left_inverse_.non_zeros);
if (algorithm == "column") {
ComputeUpdatesColumnWise();
} else if (algorithm == "row") {
@@ -151,7 +150,7 @@ void UpdateRow::ComputeUpdatesRowWise() {
SCOPED_TIME_STAT(&stats_);
const ColIndex num_cols = matrix_.num_cols();
coefficient_.AssignToZero(num_cols);
for (ColIndex col : unit_row_left_inverse_non_zeros_) {
for (ColIndex col : unit_row_left_inverse_.non_zeros) {
const Fractional multiplier = unit_row_left_inverse_[col];
for (const EntryIndex i : transposed_matrix_.Column(col)) {
const ColIndex pos = RowToColIndex(transposed_matrix_.EntryRow(i));
@@ -175,7 +174,7 @@ void UpdateRow::ComputeUpdatesRowWiseHypersparse() {
const ColIndex num_cols = matrix_.num_cols();
non_zero_position_set_.ClearAndResize(num_cols);
coefficient_.resize(num_cols, 0.0);
for (ColIndex col : unit_row_left_inverse_non_zeros_) {
for (ColIndex col : unit_row_left_inverse_.non_zeros) {
const Fractional multiplier = unit_row_left_inverse_[col];
for (const EntryIndex i : transposed_matrix_.Column(col)) {
const ColIndex pos = RowToColIndex(transposed_matrix_.EntryRow(i));
@@ -225,7 +224,7 @@ void UpdateRow::RecomputeFullUpdateRow(RowIndex leaving_row) {
// Fills the non-basic column.
for (const ColIndex col : variables_info_.GetNotBasicBitRow()) {
const Fractional coeff =
matrix_.ColumnScalarProduct(col, unit_row_left_inverse_);
matrix_.ColumnScalarProduct(col, unit_row_left_inverse_.values);
if (std::abs(coeff) > drop_tolerance) {
non_zero_position_list_.push_back(col);
coefficient_[col] = coeff;
@@ -247,7 +246,7 @@ void UpdateRow::ComputeUpdatesColumnWise() {
for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) {
// Coefficient of the column right inverse on the 'leaving_row'.
const Fractional coeff =
matrix_.ColumnScalarProduct(col, unit_row_left_inverse_);
matrix_.ColumnScalarProduct(col, unit_row_left_inverse_.values);
// Nothing to do if 'coeff' is (almost) zero which does happen due to
// sparsity. Note that it shouldn't be too bad to use a non-zero drop
// tolerance here because even if we introduce some precision issues, the
@@ -268,7 +267,7 @@ void UpdateRow::ComputeUpdatesColumnWise() {
const ColIndex col(i);
if (is_relevant.IsSet(col)) {
coefficient_[col] =
matrix_.ColumnScalarProduct(col, unit_row_left_inverse_);
matrix_.ColumnScalarProduct(col, unit_row_left_inverse_.values);
}
}
// End of omp parallel for.

View File

@@ -56,7 +56,7 @@ class UpdateRow {
// Returns the left inverse of the unit row as computed by the last call to
// ComputeUpdateRow(). In debug mode, we check that ComputeUpdateRow() was
// called since the last Invalidate().
ScatteredColumnReference GetUnitRowLeftInverse() const;
const ScatteredRow& GetUnitRowLeftInverse() const;
// Returns the update coefficients and non-zero positions corresponding to the
// last call to ComputeUpdateRow().
@@ -94,7 +94,7 @@ class UpdateRow {
private:
// Computes the left inverse of the given unit row, and stores it in
// unit_row_left_inverse_ and unit_row_left_inverse_non_zeros_.
// unit_row_left_inverse_.
void ComputeUnitRowLeftInverse(RowIndex leaving_row);
// ComputeUpdateRow() does the common work and call one of these functions
@@ -112,8 +112,7 @@ class UpdateRow {
// Left inverse by B of a unit row. Its scalar product with a column 'a' of A
// gives the value of the right inverse of 'a' on the 'leaving_row'.
DenseRow unit_row_left_inverse_;
ColIndexVector unit_row_left_inverse_non_zeros_;
ScatteredRow unit_row_left_inverse_;
// Holds the current update row data.
// TODO(user): Introduce a ScatteredSparseRow class?

View File

@@ -117,7 +117,7 @@ Fractional VariableValues::ComputeSumOfPrimalInfeasibilities() const {
return sum;
}
void VariableValues::UpdateOnPivoting(ScatteredColumnReference direction,
void VariableValues::UpdateOnPivoting(const ScatteredColumn& direction,
ColIndex entering_col, Fractional step) {
SCOPED_TIME_STAT(&stats_);
DCHECK(IsFinite(step));
@@ -133,7 +133,7 @@ void VariableValues::UpdateOnPivoting(ScatteredColumnReference direction,
// Note that there is no need to call variables_info_.Update() on basic
// variables when they change values. Note also that the status of
// entering_col will be updated later.
for (const RowIndex row : direction.non_zero_rows) {
for (const RowIndex row : direction.non_zeros) {
const ColIndex col = basis_[row];
variable_values_[col] -= direction[row] * step;
}
@@ -144,22 +144,24 @@ void VariableValues::UpdateGivenNonBasicVariables(
const std::vector<ColIndex>& cols_to_update, bool update_basic_variables) {
SCOPED_TIME_STAT(&stats_);
if (update_basic_variables) {
initially_all_zero_scratchpad_.resize(matrix_.num_rows(), 0.0);
DCHECK(IsAllZero(initially_all_zero_scratchpad_));
initially_all_zero_scratchpad_.values.resize(matrix_.num_rows(), 0.0);
DCHECK(IsAllZero(initially_all_zero_scratchpad_.values));
for (ColIndex col : cols_to_update) {
const Fractional old_value = variable_values_[col];
SetNonBasicVariableValueFromStatus(col);
matrix_.ColumnAddMultipleToDenseColumn(col,
variable_values_[col] - old_value,
&initially_all_zero_scratchpad_);
matrix_.ColumnAddMultipleToDenseColumn(
col, variable_values_[col] - old_value,
&initially_all_zero_scratchpad_.values);
}
basis_factorization_.RightSolveWithNonZeros(&initially_all_zero_scratchpad_,
&row_index_vector_scratchpad_);
for (const RowIndex row : row_index_vector_scratchpad_) {
basis_factorization_.RightSolveWithNonZeros(
&initially_all_zero_scratchpad_);
for (const RowIndex row : initially_all_zero_scratchpad_.non_zeros) {
variable_values_[basis_[row]] -= initially_all_zero_scratchpad_[row];
initially_all_zero_scratchpad_[row] = 0.0;
}
UpdatePrimalInfeasibilityInformation(row_index_vector_scratchpad_);
UpdatePrimalInfeasibilityInformation(
initially_all_zero_scratchpad_.non_zeros);
initially_all_zero_scratchpad_.non_zeros.clear();
} else {
for (ColIndex col : cols_to_update) {
SetNonBasicVariableValueFromStatus(col);

View File

@@ -74,8 +74,8 @@ class VariableValues {
// Updates the variable during a simplex pivot:
// - step * direction is substracted from the basic variables value.
// - step is added to the entering column value.
void UpdateOnPivoting(ScatteredColumnReference direction,
ColIndex entering_col, Fractional step);
void UpdateOnPivoting(const ScatteredColumn& direction, ColIndex entering_col,
Fractional step);
// Batch version of SetNonBasicVariableValueFromStatus(). This function also
// updates the basic variable values and infeasibility statuses if
@@ -125,11 +125,8 @@ class VariableValues {
mutable StatsGroup stats_;
mutable DenseColumn scratchpad_;
// A temporary dense column that is always reset to all zero after use.
DenseColumn initially_all_zero_scratchpad_;
// A temporary RowIndexVector used to hold the non-zero positions of a column.
RowIndexVector row_index_vector_scratchpad_;
// A temporary scattered column that is always reset to all zero after use.
ScatteredColumn initially_all_zero_scratchpad_;
DISALLOW_COPY_AND_ASSIGN(VariableValues);
};