diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..96ef6c0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..936bba7 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cp_sat" +version = "0.1.0" +edition = "2018" +description = "Rust bindings to the Google CP-SAT constraint programming solver." +documentation = "https://docs.rs/cp_sat" +repository = "https://github.com/KardinalAI/cp_sat" +license = "Apache-2.0" +keywords = ["constraint", "programming", "CP"] +categories = ["api-bindings", "mathematics", "science"] +readme = "README.md" + +[dependencies] +prost = "0.8" +bytes = "1.1.0" +libc = "0.2.101" + +[build-dependencies] +prost-build = { version = "0.8" } +cc = "1.0" diff --git a/README.md b/README.md new file mode 100644 index 0000000..17895dc --- /dev/null +++ b/README.md @@ -0,0 +1,10 @@ +# Google CP-SAT solver Rust bindings + +Rust bindings to the Google CP-SAT constraint programming solver. + +To use this library, you need a C++ compiler and an installation of +google or-tools library files. + +The environment variable `ORTOOLS_PREFIX` is used to find include +files and library files. If not setted, `/opt/ortools` will be added +to the search path (classical search path will also be used). diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..1b6dd98 --- /dev/null +++ b/build.rs @@ -0,0 +1,22 @@ +extern crate prost_build; + +fn main() { + prost_build::compile_protos( + &["src/cp_model.proto", "src/sat_parameters.proto"], + &["src/"], + ) + .unwrap(); + + let ortools_prefix = std::env::var("ORTOOLS_PREFIX") + .ok() + .unwrap_or("/opt/ortools".into()); + cc::Build::new() + .cpp(true) + .flag("-std=c++17") + .file("src/cp_sat_wrapper.cpp") + .include(&[&ortools_prefix, "/include"].concat()) + .compile("cp_sat_wrapper.a"); + + println!("cargo:rustc-link-lib=dylib=ortools"); + println!("cargo:rustc-link-search=native={}/lib", ortools_prefix); +} diff --git a/examples/simple_sat_program.rs b/examples/simple_sat_program.rs new file mode 100644 index 0000000..5c74c62 --- /dev/null +++ b/examples/simple_sat_program.rs @@ -0,0 +1,40 @@ +use cp_sat::proto::{ + constraint_proto::Constraint, AllDifferentConstraintProto, ConstraintProto, CpModelProto, + IntegerVariableProto, +}; + +fn main() { + let model = CpModelProto { + variables: vec![ + IntegerVariableProto { + name: "x".into(), + domain: vec![0, 2], + }, + IntegerVariableProto { + name: "y".into(), + domain: vec![0, 2], + }, + IntegerVariableProto { + name: "z".into(), + domain: vec![0, 2], + }, + ], + constraints: vec![ConstraintProto { + constraint: Some(Constraint::AllDiff(AllDifferentConstraintProto { + vars: vec![0, 1, 2], + })), + ..Default::default() + }], + ..Default::default() + }; + println!("{:#?}", model); + + println!("model stats: {}", cp_sat::cp_model_stats(&model)); + + let response = cp_sat::solve(&model); + println!("{:#?}", response); + println!( + "response stats: {}", + cp_sat::cp_solver_response_stats(&response, false) + ); +} diff --git a/src/cp_model.proto b/src/cp_model.proto new file mode 100644 index 0000000..3852e06 --- /dev/null +++ b/src/cp_model.proto @@ -0,0 +1,754 @@ +// Copyright 2010-2021 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Proto describing a general Constraint Programming (CP) problem. + +syntax = "proto3"; + +package operations_research.sat; + +option csharp_namespace = "Google.OrTools.Sat"; +option java_package = "com.google.ortools.sat"; +option java_multiple_files = true; +option java_outer_classname = "CpModelProtobuf"; + +// An integer variable. +// +// It will be referred to by an int32 corresponding to its index in a +// CpModelProto variables field. +// +// Depending on the context, a reference to a variable whose domain is in [0, 1] +// can also be seen as a Boolean that will be true if the variable value is 1 +// and false if it is 0. When used in this context, the field name will always +// contain the word "literal". +// +// Negative reference (advanced usage): to simplify the creation of a model and +// for efficiency reasons, all the "literal" or "variable" fields can also +// contain a negative index. A negative index i will refer to the negation of +// the integer variable at index -i -1 or to NOT the literal at the same index. +// +// Ex: A variable index 4 will refer to the integer variable model.variables(4) +// and an index of -5 will refer to the negation of the same variable. A literal +// index 4 will refer to the logical fact that model.variable(4) == 1 and a +// literal index of -5 will refer to the logical fact model.variable(4) == 0. +message IntegerVariableProto { + // For debug/logging only. Can be empty. + string name = 1; + + // The variable domain given as a sorted list of n disjoint intervals + // [min, max] and encoded as [min_0, max_0, ..., min_{n-1}, max_{n-1}]. + // + // The most common example being just [min, max]. + // If min == max, then this is a constant variable. + // + // We have: + // - domain_size() is always even. + // - min == domain.front(); + // - max == domain.back(); + // - for all i < n : min_i <= max_i + // - for all i < n-1 : max_i + 1 < min_{i+1}. + // + // Note that we check at validation that a variable domain is small enough so + // that we don't run into integer overflow in our algorithms. Because of that, + // you cannot just have "unbounded" variable like [0, kint64max] and should + // try to specify tighter domains. + repeated int64 domain = 2; +} + +// Argument of the constraints of the form OP(literals). +message BoolArgumentProto { + repeated int32 literals = 1; +} + +// Argument of the constraints of the form target_var = OP(vars). +message IntegerArgumentProto { + int32 target = 1; + repeated int32 vars = 2; +} + +// Some constraints supports linear expression instead of just using a reference +// to a variable. This is especially useful during presolve to reduce the model +// size. +message LinearExpressionProto { + repeated int32 vars = 1; + repeated int64 coeffs = 2; + int64 offset = 3; +} + +message LinearArgumentProto { + LinearExpressionProto target = 1; + repeated LinearExpressionProto exprs = 2; +} + +// All variables must take different values. +message AllDifferentConstraintProto { + repeated int32 vars = 1; +} + +// The linear sum vars[i] * coeffs[i] must fall in the given domain. The domain +// has the same format as the one in IntegerVariableProto. +// +// Note that the validation code currently checks using the domain of the +// involved variables that the sum can always be computed without integer +// overflow and throws an error otherwise. +message LinearConstraintProto { + repeated int32 vars = 1; + repeated int64 coeffs = 2; // Same size as vars. + repeated int64 domain = 3; +} + +// The constraint target = vars[index]. +// This enforces that index takes one of the value in [0, vars_size()). +message ElementConstraintProto { + int32 index = 1; + int32 target = 2; + repeated int32 vars = 3; +} + +// This "special" constraint not only enforces (start + size == end) and (size +// >= 0) but can also be referred by other constraints using this "interval" +// concept. +message IntervalConstraintProto { + int32 start = 1; + int32 end = 2; + int32 size = 3; + + // EXPERIMENTAL: This will become the new way to specify an interval. + // Depending on the parameters, the presolve will convert the old way to the + // new way. Do not forget to add an associated linear constraint if you use + // this directly. + // + // If any of this field is set, then all must be set and the ones above will + // be ignored. + // + // IMPORTANT: For now, this constraint do not enforce any relations on the + // view, and a linear constraint must be added together with this to enforce + // enforcement => start_view + size_view == end_view. An enforcement => + // size_view >=0 might also be needed. + // + // IMPORTANT: For now, we just support affine relation. We could easily + // create an intermediate variable to support full linear expression, but this + // isn't done currently. + LinearExpressionProto start_view = 4; + LinearExpressionProto end_view = 5; + LinearExpressionProto size_view = 6; +} + +// All the intervals (index of IntervalConstraintProto) must be disjoint. More +// formally, there must exist a sequence so that for each consecutive intervals, +// we have end_i <= start_{i+1}. In particular, intervals of size zero do matter +// for this constraint. This is also known as a disjunctive constraint in +// scheduling. +message NoOverlapConstraintProto { + repeated int32 intervals = 1; +} + +// The boxes defined by [start_x, end_x) * [start_y, end_y) cannot overlap. +message NoOverlap2DConstraintProto { + repeated int32 x_intervals = 1; + repeated int32 y_intervals = 2; // Same size as x_intervals. + bool boxes_with_null_area_can_overlap = 3; +} + +// The sum of the demands of the intervals at each interval point cannot exceed +// a capacity. Note that intervals are interpreted as [start, end) and as +// such intervals like [2,3) and [3,4) do not overlap for the point of view of +// this constraint. Moreover, intervals of size zero are ignored. +message CumulativeConstraintProto { + int32 capacity = 1; + repeated int32 intervals = 2; + repeated int32 demands = 3; // Same size as intervals. +} + +// Maintain a reservoir level within bounds. The water level starts at 0, and at +// any time, it must be within [min_level, max_level]. +// +// If the variable actives[i] is true, and if the variable times[i] is assigned +// a value t, then the current level changes by demands[i] (which is constant) +// at the time t. Therefore, at any time t: +// sum(demands[i] * actives[i] if times[i] <= t) in [min_level, max_level] +// +// Note that min level must be <= 0, and the max level must be >= 0. Please use +// fixed demands to simulate initial state. +// +// The array of boolean variables 'actives', if defined, indicates which actions +// are actually performed. If this array is not defined, then it is assumed that +// all actions will be performed. +message ReservoirConstraintProto { + int64 min_level = 1; + int64 max_level = 2; + repeated int32 times = 3; // variables. + repeated int64 demands = 4; // constants, can be negative. + repeated int32 actives = 5; // literals. +} + +// The circuit constraint is defined on a graph where the arc presence are +// controlled by literals. Each arc is given by an index in the +// tails/heads/literals lists that must have the same size. +// +// For now, we ignore node indices with no incident arc. All the other nodes +// must have exactly one incoming and one outgoing selected arc (i.e. literal at +// true). All the selected arcs that are not self-loops must form a single +// circuit. Note that multi-arcs are allowed, but only one of them will be true +// at the same time. Multi-self loop are disallowed though. +message CircuitConstraintProto { + repeated int32 tails = 3; + repeated int32 heads = 4; + repeated int32 literals = 5; +} + +// The "VRP" (Vehicle Routing Problem) constraint. +// +// The direct graph where arc #i (from tails[i] to head[i]) is present iff +// literals[i] is true must satisfy this set of properties: +// - #incoming arcs == 1 except for node 0. +// - #outgoing arcs == 1 except for node 0. +// - for node zero, #incoming arcs == #outgoing arcs. +// - There are no duplicate arcs. +// - Self-arcs are allowed except for node 0. +// - There is no cycle in this graph, except through node 0. +// +// TODO(user): It is probably possible to generalize this constraint to a +// no-cycle in a general graph, or a no-cycle with sum incoming <= 1 and sum +// outgoing <= 1 (more efficient implementation). On the other hand, having this +// specific constraint allow us to add specific "cuts" to a VRP problem. +message RoutesConstraintProto { + repeated int32 tails = 1; + repeated int32 heads = 2; + repeated int32 literals = 3; + + // Experimental. The demands for each node, and the maximum capacity for each + // route. Note that this is currently only used for the LP relaxation and one + // need to add the corresponding constraint to enforce this outside of the LP. + // + // TODO(user): Ideally, we should be able to extract any dimension like these + // (i.e. capacity, route_length, etc..) automatically from the encoding. The + // classical way to encode that is to have "current_capacity" variables along + // the route and linear equations of the form: + // arc_literal => (current_capacity_tail + demand <= current_capacity_head) + repeated int32 demands = 4; + int64 capacity = 5; +} + +// The values of the n-tuple formed by the given variables can only be one of +// the listed n-tuples in values. The n-tuples are encoded in a flattened way: +// [tuple0_v0, tuple0_v1, ..., tuple0_v{n-1}, tuple1_v0, ...]. +message TableConstraintProto { + repeated int32 vars = 1; + repeated int64 values = 2; + + // If true, the meaning is "negated", that is we forbid any of the given + // tuple from a feasible assignment. + bool negated = 3; +} + +// The two arrays of variable each represent a function, the second is the +// inverse of the first: f_direct[i] == j <=> f_inverse[j] == i. +message InverseConstraintProto { + repeated int32 f_direct = 1; + repeated int32 f_inverse = 2; +} + +// This constraint forces a sequence of variables to be accepted by an +// automaton. +message AutomatonConstraintProto { + // A state is identified by a non-negative number. It is preferable to keep + // all the states dense in says [0, num_states). The automaton starts at + // starting_state and must finish in any of the final states. + int64 starting_state = 2; + repeated int64 final_states = 3; + + // List of transitions (all 3 vectors have the same size). Both tail and head + // are states, label is any variable value. No two outgoing transitions from + // the same state can have the same label. + repeated int64 transition_tail = 4; + repeated int64 transition_head = 5; + repeated int64 transition_label = 6; + + // The sequence of variables. The automaton is ran for vars_size() "steps" and + // the value of vars[i] corresponds to the transition label at step i. + repeated int32 vars = 7; +} + +// Next id: 30 +message ConstraintProto { + // For debug/logging only. Can be empty. + string name = 1; + + // The constraint will be enforced iff all literals listed here are true. If + // this is empty, then the constraint will always be enforced. An enforced + // constraint must be satisfied, and an un-enforced one will simply be + // ignored. + // + // This is also called half-reification. To have an equivalence between a + // literal and a constraint (full reification), one must add both a constraint + // (controlled by a literal l) and its negation (controlled by the negation of + // l). + // + // Important: as of September 2018, only a few constraint support enforcement: + // - bool_or, bool_and, linear: fully supported. + // - interval: only support a single enforcement literal. + // - other: no support (but can be added on a per-demand basis). + repeated int32 enforcement_literal = 2; + + // The actual constraint with its arguments. + oneof constraint { + // The bool_or constraint forces at least one literal to be true. + BoolArgumentProto bool_or = 3; + + // The bool_and constraint forces all of the literals to be true. + // + // This is a "redundant" constraint in the sense that this can easily be + // encoded with many bool_or or at_most_one. It is just more space efficient + // and handled slightly differently internally. + BoolArgumentProto bool_and = 4; + + // The at_most_one constraint enforces that no more than one literal is + // true at the same time. + // + // Note that an at most one constraint of length n could be encoded with n + // bool_and constraint with n-1 term on the right hand side. So in a sense, + // this constraint contribute directly to the "implication-graph" or the + // 2-SAT part of the model. + // + // This constraint does not support enforcement_literal. Just use a linear + // constraint if you need to enforce it. You also do not need to use it + // directly, we will extract it from the model in most situations. + BoolArgumentProto at_most_one = 26; + + // The exactly_one constraint force exactly one literal to true and no more. + // + // Anytime a bool_or (it could have been called at_least_one) is included + // into an at_most_one, then the bool_or is actually an exactly one + // constraint, and the extra literal in the at_most_one can be set to false. + // So in this sense, this constraint is not really needed. it is just here + // for a better description of the problem structure and to facilitate some + // algorithm. + // + // This constraint does not support enforcement_literal. Just use a linear + // constraint if you need to enforce it. You also do not need to use it + // directly, we will extract it from the model in most situations. + BoolArgumentProto exactly_one = 29; + + // The bool_xor constraint forces an odd number of the literals to be true. + BoolArgumentProto bool_xor = 5; + + // The int_div constraint forces the target to equal vars[0] / vars[1]. + // In particular, vars[1] can never take the value 0. + IntegerArgumentProto int_div = 7; + + // The int_mod constraint forces the target to equal vars[0] % vars[1]. + // The domain of vars[1] must be strictly positive. + IntegerArgumentProto int_mod = 8; + + // The int_max constraint forces the target to equal the maximum of all + // variables. + // + // The lin_max constraint forces the target to equal the maximum of all + // linear expressions. + // + // TODO(user): Remove int_max in favor of lin_max. + IntegerArgumentProto int_max = 9; + LinearArgumentProto lin_max = 27; + + // The int_min constraint forces the target to equal the minimum of all + // variables. + // + // The lin_min constraint forces the target to equal the minimum of all + // linear expressions. + // + // TODO(user): Remove int_min in favor of lin_min. + IntegerArgumentProto int_min = 10; + LinearArgumentProto lin_min = 28; + + // The int_prod constraint forces the target to equal the product of all + // variables. By convention, because we can just remove term equal to one, + // the empty product forces the target to be one. + // + // TODO(user): Support more than two terms in the product. + IntegerArgumentProto int_prod = 11; + + // The linear constraint enforces a linear inequality among the variables, + // such as 0 <= x + 2y <= 10. + LinearConstraintProto linear = 12; + + // The all_diff constraint forces all variables to take different values. + AllDifferentConstraintProto all_diff = 13; + + // The element constraint forces the variable with the given index + // to be equal to the target. + ElementConstraintProto element = 14; + + // The circuit constraint takes a graph and forces the arcs present + // (with arc presence indicated by a literal) to form a unique cycle. + CircuitConstraintProto circuit = 15; + + // The routes constraint implements the vehicle routing problem. + RoutesConstraintProto routes = 23; + + // The table constraint enforces what values a tuple of variables may + // take. + TableConstraintProto table = 16; + + // The automaton constraint forces a sequence of variables to be accepted + // by an automaton. + AutomatonConstraintProto automaton = 17; + + // The inverse constraint forces two arrays to be inverses of each other: + // the values of one are the indices of the other, and vice versa. + InverseConstraintProto inverse = 18; + + // The reservoir constraint forces the sum of a set of active demands + // to always be between a specified minimum and maximum value during + // specific times. + ReservoirConstraintProto reservoir = 24; + + // Constraints on intervals. + // + // The first constraint defines what an "interval" is and the other + // constraints use references to it. All the intervals that have an + // enforcement_literal set to false are ignored by these constraints. + // + // TODO(user): Explain what happen for intervals of size zero. Some + // constraints ignore them; others do take them into account. + + // The interval constraint takes a start, end, and size, and forces + // start + size == end. + IntervalConstraintProto interval = 19; + + // The no_overlap constraint prevents a set of intervals from + // overlapping; in scheduling, this is called a disjunctive + // constraint. + NoOverlapConstraintProto no_overlap = 20; + + // The no_overlap_2d constraint prevents a set of boxes from overlapping. + NoOverlap2DConstraintProto no_overlap_2d = 21; + + // The cumulative constraint ensures that for any integer point, the sum + // of the demands of the intervals containing that point does not exceed + // the capacity. + CumulativeConstraintProto cumulative = 22; + } +} + +// Optimization objective. +// +// This is in a message because decision problems don't have any objective. +message CpObjectiveProto { + // The linear terms of the objective to minimize. + // For a maximization problem, one can negate all coefficients in the + // objective and set a scaling_factor to -1. + repeated int32 vars = 1; + repeated int64 coeffs = 4; + + // The displayed objective is always: + // scaling_factor * (sum(coefficients[i] * objective_vars[i]) + offset). + // This is needed to have a consistent objective after presolve or when + // scaling a double problem to express it with integers. + // + // Note that if scaling_factor is zero, then it is assumed to be 1, so that by + // default these fields have no effect. + double offset = 2; + double scaling_factor = 3; + + // If non-empty, only look for an objective value in the given domain. + // Note that this does not depend on the offset or scaling factor, it is a + // domain on the sum of the objective terms only. + repeated int64 domain = 5; +} + +// Define the strategy to follow when the solver needs to take a new decision. +// Note that this strategy is only defined on a subset of variables. +message DecisionStrategyProto { + // The variables to be considered for the next decision. The order matter and + // is always used as a tie-breaker after the variable selection strategy + // criteria defined below. + repeated int32 variables = 1; + + // The order in which the variables above should be considered. Note that only + // variables that are not already fixed are considered. + // + // TODO(user): extend as needed. + enum VariableSelectionStrategy { + CHOOSE_FIRST = 0; + CHOOSE_LOWEST_MIN = 1; + CHOOSE_HIGHEST_MAX = 2; + CHOOSE_MIN_DOMAIN_SIZE = 3; + CHOOSE_MAX_DOMAIN_SIZE = 4; + } + VariableSelectionStrategy variable_selection_strategy = 2; + + // Once a variable has been chosen, this enum describe what decision is taken + // on its domain. + // + // TODO(user): extend as needed. + enum DomainReductionStrategy { + SELECT_MIN_VALUE = 0; + SELECT_MAX_VALUE = 1; + SELECT_LOWER_HALF = 2; + SELECT_UPPER_HALF = 3; + SELECT_MEDIAN_VALUE = 4; + } + DomainReductionStrategy domain_reduction_strategy = 3; + + // Advanced usage. Some of the variable listed above may have been transformed + // by the presolve so this is needed to properly follow the given selection + // strategy. Instead of using a value X for variables[index], we will use + // positive_coeff * X + offset instead. + message AffineTransformation { + int32 index = 1; + int64 offset = 2; + int64 positive_coeff = 3; + } + repeated AffineTransformation transformations = 4; +} + +// This message encodes a partial (or full) assignment of the variables of a +// CpModelProto. The variable indices should be unique and valid variable +// indices. +message PartialVariableAssignment { + repeated int32 vars = 1; + repeated int64 values = 2; +} + +// A permutation of integers encoded as a list of cycles, hence the "sparse" +// format. The image of an element cycle[i] is cycle[(i + 1) % cycle_length]. +message SparsePermutationProto { + // Each cycle is listed one after the other in the support field. + // The size of each cycle is given (in order) in the cycle_sizes field. + repeated int32 support = 1; + repeated int32 cycle_sizes = 2; +} + +// A dense matrix of numbers encoded in a flat way, row by row. +// That is matrix[i][j] = entries[i * num_cols + j]; +message DenseMatrixProto { + int32 num_rows = 1; + int32 num_cols = 2; + repeated int32 entries = 3; +} + +// Experimental. For now, this is meant to be used by the solver and not filled +// by clients. +// +// Hold symmetry information about the set of feasible solutions. If we permute +// the variable values of any feasible solution using one of the permutation +// described here, we should always get another feasible solution. +// +// We usually also enforce that the objective of the new solution is the same. +// +// The group of permutations encoded here is usually computed from the encoding +// of the model, so it is not meant to be a complete representation of the +// feasible solution symmetries, just a valid subgroup. +message SymmetryProto { + // A list of variable indices permutations that leave the feasible space of + // solution invariant. Usually, we only encode a set of generators of the + // group. + repeated SparsePermutationProto permutations = 1; + + // An orbitope is a special symmetry structure of the solution space. If the + // variable indices are arranged in a matrix (with no duplicates), then any + // permutation of the columns will be a valid permutation of the feasible + // space. + // + // This arise quite often. The typical example is a graph coloring problem + // where for each node i, you have j booleans to indicate its color. If the + // variables color_of_i_is_j are arranged in a matrix[i][j], then any columns + // permutations leave the problem invariant. + repeated DenseMatrixProto orbitopes = 2; +} + +// A constraint programming problem. +message CpModelProto { + // For debug/logging only. Can be empty. + string name = 1; + + // The associated Protos should be referred by their index in these fields. + repeated IntegerVariableProto variables = 2; + repeated ConstraintProto constraints = 3; + + // The objective to minimize. Can be empty for pure decision problems. + CpObjectiveProto objective = 4; + + // Defines the strategy that the solver should follow when the + // search_branching parameter is set to FIXED_SEARCH. Note that this strategy + // is also used as a heuristic when we are not in fixed search. + // + // Advanced Usage: if not all variables appears and the parameter + // "instantiate_all_variables" is set to false, then the solver will not try + // to instantiate the variables that do not appear. Thus, at the end of the + // search, not all variables may be fixed and this is why we have the + // solution_lower_bounds and solution_upper_bounds fields in the + // CpSolverResponse. + repeated DecisionStrategyProto search_strategy = 5; + + // Solution hint. + // + // If a feasible or almost-feasible solution to the problem is already known, + // it may be helpful to pass it to the solver so that it can be used. The + // solver will try to use this information to create its initial feasible + // solution. + // + // Note that it may not always be faster to give a hint like this to the + // solver. There is also no guarantee that the solver will use this hint or + // try to return a solution "close" to this assignment in case of multiple + // optimal solutions. + PartialVariableAssignment solution_hint = 6; + + // A list of literals. The model will be solved assuming all these literals + // are true. Compared to just fixing the domain of these literals, using this + // mechanism is slower but allows in case the model is INFEASIBLE to get a + // potentially small subset of them that can be used to explain the + // infeasibility. + // + // Think (IIS), except when you are only concerned by the provided + // assumptions. This is powerful as it allows to group a set of logicially + // related constraint under only one enforcement literal which can potentially + // give you a good and interpretable explanation for infeasiblity. + // + // Such infeasibility explanation will be available in the + // sufficient_assumptions_for_infeasibility response field. + repeated int32 assumptions = 7; + + // For now, this is not meant to be filled by a client writing a model, but + // by our preprocessing step. + // + // Information about the symmetries of the feasible solution space. + // These usually leaves the objective invariant. + SymmetryProto symmetry = 8; +} + +// The status returned by a solver trying to solve a CpModelProto. +enum CpSolverStatus { + // The status of the model is still unknown. A search limit has been reached + // before any of the statuses below could be determined. + UNKNOWN = 0; + + // The given CpModelProto didn't pass the validation step. You can get a + // detailed error by calling ValidateCpModel(model_proto). + MODEL_INVALID = 1; + + // A feasible solution has been found. But the search was stopped before we + // could prove optimality or before we enumerated all solutions of a + // feasibility problem (if asked). + FEASIBLE = 2; + + // The problem has been proven infeasible. + INFEASIBLE = 3; + + // An optimal feasible solution has been found. + // + // More generally, this status represent a success. So we also return OPTIMAL + // if we find a solution for a pure feasiblity problem or if a gap limit has + // been specified and we return a solution within this limit. In the case + // where we need to return all the feasible solution, this status will only be + // returned if we enumerated all of them; If we stopped before, we will return + // FEASIBLE. + OPTIMAL = 4; +} + +// The response returned by a solver trying to solve a CpModelProto. +// +// TODO(user): support returning multiple solutions. Look at the Stubby +// streaming API as we probably wants to get them as they are found. +// Next id: 27 +message CpSolverResponse { + // The status of the solve. + CpSolverStatus status = 1; + + // A feasible solution to the given problem. Depending on the returned status + // it may be optimal or just feasible. This is in one-to-one correspondence + // with a CpModelProto::variables repeated field and list the values of all + // the variables. + repeated int64 solution = 2; + + // Only make sense for an optimization problem. The objective value of the + // returned solution if it is non-empty. If there is no solution, then for a + // minimization problem, this will be an upper-bound of the objective of any + // feasible solution, and a lower-bound for a maximization problem. + double objective_value = 3; + + // Only make sense for an optimization problem. A proven lower-bound on the + // objective for a minimization problem, or a proven upper-bound for a + // maximization problem. + double best_objective_bound = 4; + + // Advanced usage. + // + // If the problem has some variables that are not fixed at the end of the + // search (because of a particular search strategy in the CpModelProto) then + // this will be used instead of filling the solution above. The two fields + // will then contains the lower and upper bounds of each variable as they were + // when the best "solution" was found. + repeated int64 solution_lower_bounds = 18; + repeated int64 solution_upper_bounds = 19; + + // Advanced usage. + // + // If the option fill_tightened_domains_in_response is set, then this field + // will be a copy of the CpModelProto.variables where each domain has been + // reduced using the information the solver was able to derive. Note that this + // is only filled with the info derived during a normal search and we do not + // have any dedicated algorithm to improve it. + // + // If the problem is a feasibility problem, then these bounds will be valid + // for any feasible solution. If the problem is an optimization problem, then + // these bounds will only be valid for any OPTIMAL solutions, it can exclude + // sub-optimal feasible ones. + repeated IntegerVariableProto tightened_variables = 21; + + // A subset of the model "assumptions" field. This will only be filled if the + // status is INFEASIBLE. This subset of assumption will be enough to still get + // an infeasible problem. + // + // This is related to what is called the irreducible inconsistent subsystem or + // IIS. Except one is only concerned by the provided assumptions. There is + // also no guarantee that we return an irreducible (aka minimal subset). + // However, this is based on SAT explanation and there is a good chance it is + // not too large. + // + // If you really want a minimal subset, a possible way to get one is by + // changing your model to minimize the number of assumptions at false, but + // this is likely an harder problem to solve. + // + // TODO(user): Allows for returning multiple core at once. + repeated int32 sufficient_assumptions_for_infeasibility = 23; + + // This will be true iff the solver was asked to find all solutions to a + // satisfiability problem (or all optimal solutions to an optimization + // problem), and it was successful in doing so. + // + // TODO(user): Remove as we also use the OPTIMAL vs FEASIBLE status for that. + bool all_solutions_were_found = 5; + + // Some statistics about the solve. + int64 num_booleans = 10; + int64 num_conflicts = 11; + int64 num_branches = 12; + int64 num_binary_propagations = 13; + int64 num_integer_propagations = 14; + int64 num_restarts = 24; + int64 num_lp_iterations = 25; + + double wall_time = 15; + double user_time = 16; + double deterministic_time = 17; + double primal_integral = 22; + + // Additional information about how the solution was found. + string solution_info = 20; + + // The solve log will be filled if the parameter log_to_response is set to + // true. + string solve_log = 26; +} diff --git a/src/cp_sat_wrapper.cpp b/src/cp_sat_wrapper.cpp new file mode 100644 index 0000000..a6d107f --- /dev/null +++ b/src/cp_sat_wrapper.cpp @@ -0,0 +1,75 @@ +#include + +#include + +namespace sat = operations_research::sat; + +extern "C" unsigned char* +cp_sat_wrapper_solve( + unsigned char* model_buf, + size_t model_size, + size_t* out_size) +{ + sat::CpModelProto model; + bool res = model.ParseFromArray(model_buf, model_size); + assert(res); + + sat::CpSolverResponse response = sat::Solve(model); + + *out_size = response.ByteSizeLong(); + unsigned char* out_buf = (unsigned char*) malloc(*out_size); + res = response.SerializeToArray(out_buf, *out_size); + assert(res); + + return out_buf; +} + + extern "C" unsigned char* + cp_sat_wrapper_solve_with_parameters( + unsigned char* model_buf, + size_t model_size, + unsigned char* params_buf, + size_t params_size, + size_t* out_size) + { + sat::CpModelProto model; + bool res = model.ParseFromArray(model_buf, model_size); + assert(res); + + sat::SatParameters params; + res = model.ParseFromArray(params_buf, params_size); + assert(res); + + sat::CpSolverResponse response = sat::SolveWithParameters(model, params); + + *out_size = response.ByteSizeLong(); + unsigned char* out_buf = (unsigned char*) malloc(*out_size); + res = response.SerializeToArray(out_buf, *out_size); + assert(res); + + return out_buf; +} + +extern "C" char* +cp_sat_wrapper_cp_model_stats(unsigned char* model_buf, size_t model_size) { + sat::CpModelProto model; + const bool res = model.ParseFromArray(model_buf, model_size); + assert(res); + + const std::string stats = sat::CpModelStats(model); + return strdup(stats.c_str()); +} + +extern "C" char* +cp_sat_wrapper_cp_solver_response_stats( + unsigned char* response_buf, + size_t response_size, + bool has_objective) +{ + sat::CpSolverResponse response; + const bool res = response.ParseFromArray(response_buf, response_size); + assert(res); + + const std::string stats = sat::CpSolverResponseStats(response, has_objective); + return strdup(stats.c_str()); +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..6d4e950 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,94 @@ +use libc::c_char; +use prost::Message; +use std::ffi::CStr; + +pub mod proto { + include!(concat!(env!("OUT_DIR"), "/operations_research.sat.rs")); +} + +extern "C" { + fn cp_sat_wrapper_solve( + model_buf: *const u8, + model_size: usize, + out_size: &mut usize, + ) -> *mut u8; + fn cp_sat_wrapper_solve_with_parameters( + model_buf: *const u8, + model_size: usize, + params_buf: *const u8, + params_size: usize, + out_size: &mut usize, + ) -> *mut u8; + fn cp_sat_wrapper_cp_model_stats(model_buf: *const u8, model_size: usize) -> *mut c_char; + fn cp_sat_wrapper_cp_solver_response_stats( + response_buf: *const u8, + response_size: usize, + has_objective: bool, + ) -> *mut c_char; +} + +pub fn solve(model: &proto::CpModelProto) -> proto::CpSolverResponse { + let mut buf = Vec::default(); + model.encode(&mut buf).unwrap(); + let mut out_size = 0; + let res = unsafe { cp_sat_wrapper_solve(buf.as_ptr(), buf.len(), &mut out_size) }; + let out_slice = unsafe { std::slice::from_raw_parts(res, out_size) }; + let response = proto::CpSolverResponse::decode(out_slice).unwrap(); + unsafe { libc::free(res as _) }; + response +} + +pub fn solve_with_parameters( + model: &proto::CpModelProto, + params: &proto::SatParameters, +) -> proto::CpSolverResponse { + let mut model_buf = Vec::default(); + model.encode(&mut model_buf).unwrap(); + let mut params_buf = Vec::default(); + params.encode(&mut params_buf).unwrap(); + + let mut out_size = 0; + let res = unsafe { + cp_sat_wrapper_solve_with_parameters( + model_buf.as_ptr(), + model_buf.len(), + params_buf.as_ptr(), + params_buf.len(), + &mut out_size, + ) + }; + let out_slice = unsafe { std::slice::from_raw_parts(res, out_size) }; + let response = proto::CpSolverResponse::decode(out_slice).unwrap(); + unsafe { libc::free(res as _) }; + response +} + +pub fn cp_model_stats(model: &proto::CpModelProto) -> String { + let mut model_buf = Vec::default(); + model.encode(&mut model_buf).unwrap(); + let char_ptr = unsafe { cp_sat_wrapper_cp_model_stats(model_buf.as_ptr(), model_buf.len()) }; + let res = unsafe { CStr::from_ptr(char_ptr) } + .to_str() + .unwrap() + .to_owned(); + unsafe { libc::free(char_ptr as _) }; + res +} + +pub fn cp_solver_response_stats(response: &proto::CpSolverResponse, has_objective: bool) -> String { + let mut response_buf = Vec::default(); + response.encode(&mut response_buf).unwrap(); + let char_ptr = unsafe { + cp_sat_wrapper_cp_solver_response_stats( + response_buf.as_ptr(), + response_buf.len(), + has_objective, + ) + }; + let res = unsafe { CStr::from_ptr(char_ptr) } + .to_str() + .unwrap() + .to_owned(); + unsafe { libc::free(char_ptr as _) }; + res +} diff --git a/src/sat_parameters.proto b/src/sat_parameters.proto new file mode 100644 index 0000000..480518d --- /dev/null +++ b/src/sat_parameters.proto @@ -0,0 +1,1038 @@ +// Copyright 2010-2021 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package operations_research.sat; + +option java_package = "com.google.ortools.sat"; +option java_multiple_files = true; + +// Contains the definitions for all the sat algorithm parameters and their +// default values. +// +// NEXT TAG: 188 +message SatParameters { + // In some context, like in a portfolio of search, it makes sense to name a + // given parameters set for logging purpose. + optional string name = 171 [default = ""]; + + // ========================================================================== + // Branching and polarity + // ========================================================================== + + // Variables without activity (i.e. at the beginning of the search) will be + // tried in this preferred order. + enum VariableOrder { + IN_ORDER = 0; // As specified by the problem. + IN_REVERSE_ORDER = 1; + IN_RANDOM_ORDER = 2; + } + optional VariableOrder preferred_variable_order = 1 [default = IN_ORDER]; + + // Specifies the initial polarity (true/false) when the solver branches on a + // variable. This can be modified later by the user, or the phase saving + // heuristic. + // + // Note(user): POLARITY_FALSE is usually a good choice because of the + // "natural" way to express a linear boolean problem. + enum Polarity { + POLARITY_TRUE = 0; + POLARITY_FALSE = 1; + POLARITY_RANDOM = 2; + + // Choose the sign that tends to satisfy the most constraints. This is + // computed using a weighted sum: if a literal l appears in a constraint of + // the form: ... + coeff * l +... <= rhs with positive coefficients and + // rhs, then -sign(l) * coeff / rhs is added to the weight of l.variable(). + POLARITY_WEIGHTED_SIGN = 3; + + // The opposite choice of POLARITY_WEIGHTED_SIGN. + POLARITY_REVERSE_WEIGHTED_SIGN = 4; + } + optional Polarity initial_polarity = 2 [default = POLARITY_FALSE]; + + // If this is true, then the polarity of a variable will be the last value it + // was assigned to, or its default polarity if it was never assigned since the + // call to ResetDecisionHeuristic(). + // + // Actually, we use a newer version where we follow the last value in the + // longest non-conflicting partial assignment in the current phase. + // + // This is called 'literal phase saving'. For details see 'A Lightweight + // Component Caching Scheme for Satisfiability Solvers' K. Pipatsrisawat and + // A.Darwiche, In 10th International Conference on Theory and Applications of + // Satisfiability Testing, 2007. + optional bool use_phase_saving = 44 [default = true]; + + // If non-zero, then we change the polarity heuristic after that many number + // of conflicts in an arithmetically increasing fashion. So x the first time, + // 2 * x the second time, etc... + optional int32 polarity_rephase_increment = 168 [default = 1000]; + + // The proportion of polarity chosen at random. Note that this take + // precedence over the phase saving heuristic. This is different from + // initial_polarity:POLARITY_RANDOM because it will select a new random + // polarity each time the variable is branched upon instead of selecting one + // initially and then always taking this choice. + optional double random_polarity_ratio = 45 [default = 0.0]; + + // A number between 0 and 1 that indicates the proportion of branching + // variables that are selected randomly instead of choosing the first variable + // from the given variable_ordering strategy. + optional double random_branches_ratio = 32 [default = 0.0]; + + // Whether we use the ERWA (Exponential Recency Weighted Average) heuristic as + // described in "Learning Rate Based Branching Heuristic for SAT solvers", + // J.H.Liang, V. Ganesh, P. Poupart, K.Czarnecki, SAT 2016. + optional bool use_erwa_heuristic = 75 [default = false]; + + // The initial value of the variables activity. A non-zero value only make + // sense when use_erwa_heuristic is true. Experiments with a value of 1e-2 + // together with the ERWA heuristic showed slighthly better result than simply + // using zero. The idea is that when the "learning rate" of a variable becomes + // lower than this value, then we prefer to branch on never explored before + // variables. This is not in the ERWA paper. + optional double initial_variables_activity = 76 [default = 0.0]; + + // When this is true, then the variables that appear in any of the reason of + // the variables in a conflict have their activity bumped. This is addition to + // the variables in the conflict, and the one that were used during conflict + // resolution. + optional bool also_bump_variables_in_conflict_reasons = 77 [default = false]; + + // ========================================================================== + // Conflict analysis + // ========================================================================== + + // Do we try to minimize conflicts (greedily) when creating them. + enum ConflictMinimizationAlgorithm { + NONE = 0; + SIMPLE = 1; + RECURSIVE = 2; + EXPERIMENTAL = 3; + } + optional ConflictMinimizationAlgorithm minimization_algorithm = 4 + [default = RECURSIVE]; + + // Whether to expoit the binary clause to minimize learned clauses further. + // This will have an effect only if treat_binary_clauses_separately is true. + enum BinaryMinizationAlgorithm { + NO_BINARY_MINIMIZATION = 0; + BINARY_MINIMIZATION_FIRST = 1; + BINARY_MINIMIZATION_FIRST_WITH_TRANSITIVE_REDUCTION = 4; + BINARY_MINIMIZATION_WITH_REACHABILITY = 2; + EXPERIMENTAL_BINARY_MINIMIZATION = 3; + } + optional BinaryMinizationAlgorithm binary_minimization_algorithm = 34 + [default = BINARY_MINIMIZATION_FIRST]; + + // At a really low cost, during the 1-UIP conflict computation, it is easy to + // detect if some of the involved reasons are subsumed by the current + // conflict. When this is true, such clauses are detached and later removed + // from the problem. + optional bool subsumption_during_conflict_analysis = 56 [default = true]; + + // ========================================================================== + // Clause database management + // ========================================================================== + + // Trigger a cleanup when this number of "deletable" clauses is learned. + optional int32 clause_cleanup_period = 11 [default = 10000]; + + // During a cleanup, we will always keep that number of "deletable" clauses. + // Note that this doesn't include the "protected" clauses. + optional int32 clause_cleanup_target = 13 [default = 10000]; + + // Each time a clause activity is bumped, the clause has a chance to be + // protected during the next cleanup phase. Note that clauses used as a reason + // are always protected. + enum ClauseProtection { + PROTECTION_NONE = 0; // No protection. + PROTECTION_ALWAYS = 1; // Protect all clauses whose activity is bumped. + PROTECTION_LBD = 2; // Only protect clause with a better LBD. + } + optional ClauseProtection clause_cleanup_protection = 58 + [default = PROTECTION_NONE]; + + // All the clauses with a LBD (literal blocks distance) lower or equal to this + // parameters will always be kept. + optional int32 clause_cleanup_lbd_bound = 59 [default = 5]; + + // The clauses that will be kept during a cleanup are the ones that come + // first under this order. We always keep or exclude ties together. + enum ClauseOrdering { + // Order clause by decreasing activity, then by increasing LBD. + CLAUSE_ACTIVITY = 0; + // Order clause by increasing LBD, then by decreasing activity. + CLAUSE_LBD = 1; + } + optional ClauseOrdering clause_cleanup_ordering = 60 + [default = CLAUSE_ACTIVITY]; + + // Same as for the clauses, but for the learned pseudo-Boolean constraints. + optional int32 pb_cleanup_increment = 46 [default = 200]; + optional double pb_cleanup_ratio = 47 [default = 0.5]; + + // Parameters for an heuristic similar to the one descibed in "An effective + // learnt clause minimization approach for CDCL Sat Solvers", + // https://www.ijcai.org/proceedings/2017/0098.pdf + // + // For now, we have a somewhat simpler implementation where every x restart we + // spend y decisions on clause minimization. The minimization technique is the + // same as the one used to minimize core in max-sat. We also minimize problem + // clauses and not just the learned clause that we keep forever like in the + // paper. + // + // Changing these parameters or the kind of clause we minimize seems to have + // a big impact on the overall perf on our benchmarks. So this technique seems + // definitely useful, but it is hard to tune properly. + optional int32 minimize_with_propagation_restart_period = 96 [default = 10]; + optional int32 minimize_with_propagation_num_decisions = 97 [default = 1000]; + + // ========================================================================== + // Variable and clause activities + // ========================================================================== + + // Each time a conflict is found, the activities of some variables are + // increased by one. Then, the activity of all variables are multiplied by + // variable_activity_decay. + // + // To implement this efficiently, the activity of all the variables is not + // decayed at each conflict. Instead, the activity increment is multiplied by + // 1 / decay. When an activity reach max_variable_activity_value, all the + // activity are multiplied by 1 / max_variable_activity_value. + optional double variable_activity_decay = 15 [default = 0.8]; + optional double max_variable_activity_value = 16 [default = 1e100]; + + // The activity starts at 0.8 and increment by 0.01 every 5000 conflicts until + // 0.95. This "hack" seems to work well and comes from: + // + // Glucose 2.3 in the SAT 2013 Competition - SAT Competition 2013 + // http://edacc4.informatik.uni-ulm.de/SC13/solver-description-download/136 + optional double glucose_max_decay = 22 [default = 0.95]; + optional double glucose_decay_increment = 23 [default = 0.01]; + optional int32 glucose_decay_increment_period = 24 [default = 5000]; + + // Clause activity parameters (same effect as the one on the variables). + optional double clause_activity_decay = 17 [default = 0.999]; + optional double max_clause_activity_value = 18 [default = 1e20]; + + // ========================================================================== + // Restart + // ========================================================================== + + // Restart algorithms. + // + // A reference for the more advanced ones is: + // Gilles Audemard, Laurent Simon, "Refining Restarts Strategies for SAT + // and UNSAT", Principles and Practice of Constraint Programming Lecture + // Notes in Computer Science 2012, pp 118-126 + enum RestartAlgorithm { + NO_RESTART = 0; + + // Just follow a Luby sequence times restart_period. + LUBY_RESTART = 1; + + // Moving average restart based on the decision level of conflicts. + DL_MOVING_AVERAGE_RESTART = 2; + + // Moving average restart based on the LBD of conflicts. + LBD_MOVING_AVERAGE_RESTART = 3; + + // Fixed period restart every restart period. + FIXED_RESTART = 4; + } + + // The restart strategies will change each time the strategy_counter is + // increased. The current strategy will simply be the one at index + // strategy_counter modulo the number of strategy. Note that if this list + // includes a NO_RESTART, nothing will change when it is reached because the + // strategy_counter will only increment after a restart. + // + // The idea of switching of search strategy tailored for SAT/UNSAT comes from + // Chanseok Oh with his COMiniSatPS solver, see http://cs.nyu.edu/~chanseok/. + // But more generally, it seems REALLY beneficial to try different strategy. + repeated RestartAlgorithm restart_algorithms = 61; + optional string default_restart_algorithms = 70 + [default = + "LUBY_RESTART,LBD_MOVING_AVERAGE_RESTART,DL_MOVING_AVERAGE_RESTART"]; + + // Restart period for the FIXED_RESTART strategy. This is also the multiplier + // used by the LUBY_RESTART strategy. + optional int32 restart_period = 30 [default = 50]; + + // Size of the window for the moving average restarts. + optional int32 restart_running_window_size = 62 [default = 50]; + + // In the moving average restart algorithms, a restart is triggered if the + // window average times this ratio is greater that the global average. + optional double restart_dl_average_ratio = 63 [default = 1.0]; + optional double restart_lbd_average_ratio = 71 [default = 1.0]; + + // Block a moving restart algorithm if the trail size of the current conflict + // is greater than the multiplier times the moving average of the trail size + // at the previous conflicts. + optional bool use_blocking_restart = 64 [default = false]; + optional int32 blocking_restart_window_size = 65 [default = 5000]; + optional double blocking_restart_multiplier = 66 [default = 1.4]; + + // After each restart, if the number of conflict since the last strategy + // change is greater that this, then we increment a "strategy_counter" that + // can be use to change the search strategy used by the following restarts. + optional int32 num_conflicts_before_strategy_changes = 68 [default = 0]; + + // The parameter num_conflicts_before_strategy_changes is increased by that + // much after each strategy change. + optional double strategy_change_increase_ratio = 69 [default = 0.0]; + + // ========================================================================== + // Limits + // ========================================================================== + + // Maximum time allowed in seconds to solve a problem. + // The counter will starts at the beginning of the Solve() call. + optional double max_time_in_seconds = 36 [default = inf]; + + // Maximum time allowed in deterministic time to solve a problem. + // The deterministic time should be correlated with the real time used by the + // solver, the time unit being as close as possible to a second. + optional double max_deterministic_time = 67 [default = inf]; + + // Maximum number of conflicts allowed to solve a problem. + // + // TODO(user,user): Maybe change the way the conflict limit is enforced? + // currently it is enforced on each independent internal SAT solve, rather + // than on the overall number of conflicts across all solves. So in the + // context of an optimization problem, this is not really usable directly by a + // client. + optional int64 max_number_of_conflicts = 37 + [default = 0x7FFFFFFFFFFFFFFF]; // kint64max + + // Maximum memory allowed for the whole thread containing the solver. The + // solver will abort as soon as it detects that this limit is crossed. As a + // result, this limit is approximative, but usually the solver will not go too + // much over. + optional int64 max_memory_in_mb = 40 [default = 10000]; + + // Stop the search when the gap between the best feasible objective (O) and + // our best objective bound (B) is smaller than a limit. + // The exact definition is: + // - Absolute: abs(O - B) + // - Relative: abs(O - B) / max(1, abs(O)). + // + // Important: The relative gap depends on the objective offset! If you + // artificially shift the objective, you will get widely different value of + // the relative gap. + // + // Note that if the gap is reached, the search status will be OPTIMAL. But + // one can check the best objective bound to see the actual gap. + optional double absolute_gap_limit = 159 [default = 0.0]; + optional double relative_gap_limit = 160 [default = 0.0]; + + // ========================================================================== + // Other parameters + // ========================================================================== + + // If true, the binary clauses are treated separately from the others. This + // should be faster and uses less memory. However it changes the propagation + // order. + optional bool treat_binary_clauses_separately = 33 [default = true]; + + // At the beginning of each solve, the random number generator used in some + // part of the solver is reinitialized to this seed. If you change the random + // seed, the solver may make different choices during the solving process. + // + // For some problems, the running time may vary a lot depending on small + // change in the solving algorithm. Running the solver with different seeds + // enables to have more robust benchmarks when evaluating new features. + optional int32 random_seed = 31 [default = 1]; + + // This is mainly here to test the solver variability. Note that in tests, if + // not explicitly set to false, all 3 options will be set to true so that + // clients do not rely on the solver returning a specific solution if they are + // many equivalent optimal solutions. + optional bool permute_variable_randomly = 178 [default = false]; + optional bool permute_presolve_constraint_order = 179 [default = false]; + optional bool use_absl_random = 180 [default = false]; + + // Whether the solver should log the search progress. By default, it logs to + // LOG(INFO). This can be overwritten by the log_destination parameter. + optional bool log_search_progress = 41 [default = false]; + + // Add a prefix to all logs. + optional string log_prefix = 185 [default = ""]; + + // Log to stdout. + optional bool log_to_stdout = 186 [default = true]; + + // Log to response proto. + optional bool log_to_response = 187 [default = false]; + + // Whether to use pseudo-Boolean resolution to analyze a conflict. Note that + // this option only make sense if your problem is modelized using + // pseudo-Boolean constraints. If you only have clauses, this shouldn't change + // anything (except slow the solver down). + optional bool use_pb_resolution = 43 [default = false]; + + // A different algorithm during PB resolution. It minimizes the number of + // calls to ReduceCoefficients() which can be time consuming. However, the + // search space will be different and if the coefficients are large, this may + // lead to integer overflows that could otherwise be prevented. + optional bool minimize_reduction_during_pb_resolution = 48 [default = false]; + + // Whether or not the assumption levels are taken into account during the LBD + // computation. According to the reference below, not counting them improves + // the solver in some situation. Note that this only impact solves under + // assumptions. + // + // Gilles Audemard, Jean-Marie Lagniez, Laurent Simon, "Improving Glucose for + // Incremental SAT Solving with Assumptions: Application to MUS Extraction" + // Theory and Applications of Satisfiability Testing - SAT 2013, Lecture Notes + // in Computer Science Volume 7962, 2013, pp 309-317. + optional bool count_assumption_levels_in_lbd = 49 [default = true]; + + // ========================================================================== + // Presolve + // ========================================================================== + + // During presolve, only try to perform the bounded variable elimination (BVE) + // of a variable x if the number of occurrences of x times the number of + // occurrences of not(x) is not greater than this parameter. + optional int32 presolve_bve_threshold = 54 [default = 500]; + + // During presolve, we apply BVE only if this weight times the number of + // clauses plus the number of clause literals is not increased. + optional int32 presolve_bve_clause_weight = 55 [default = 3]; + + // The maximum "deterministic" time limit to spend in probing. A value of + // zero will disable the probing. + optional double presolve_probing_deterministic_time_limit = 57 + [default = 30.0]; + + // Whether we use an heuristic to detect some basic case of blocked clause + // in the SAT presolve. + optional bool presolve_blocked_clause = 88 [default = true]; + + // Whether or not we use Bounded Variable Addition (BVA) in the presolve. + optional bool presolve_use_bva = 72 [default = true]; + + // Apply Bounded Variable Addition (BVA) if the number of clauses is reduced + // by stricly more than this threshold. The algorithm described in the paper + // uses 0, but quick experiments showed that 1 is a good value. It may not be + // worth it to add a new variable just to remove one clause. + optional int32 presolve_bva_threshold = 73 [default = 1]; + + // In case of large reduction in a presolve iteration, we perform multiple + // presolve iterations. This parameter controls the maximum number of such + // presolve iterations. + optional int32 max_presolve_iterations = 138 [default = 3]; + + // Whether we presolve the cp_model before solving it. + optional bool cp_model_presolve = 86 [default = true]; + + // Advanced usage. We have two different postsolve code. The default one + // should be better and it allows for a more powerful presolve, but some + // rarely used features like not fully assigning all variables require the + // other one. + optional bool cp_model_postsolve_with_full_solver = 162 [default = false]; + + // If positive, try to stop just after that many presolve rules have been + // applied. This is mainly useful for debugging presolve. + optional int32 cp_model_max_num_presolve_operations = 151 [default = 0]; + + // How much effort do we spend on probing. 0 disables it completely. + optional int32 cp_model_probing_level = 110 [default = 2]; + + // Whether we also use the sat presolve when cp_model_presolve is true. + optional bool cp_model_use_sat_presolve = 93 [default = true]; + optional bool use_sat_inprocessing = 163 [default = false]; + + // If true, the element constraints are expanded into many + // linear constraints of the form (index == i) => (element[i] == target). + optional bool expand_element_constraints = 140 [default = true]; + + // If true, the automaton constraints are expanded. + optional bool expand_automaton_constraints = 143 [default = true]; + + // If true, the positive table constraints are expanded. + // Note that currently, negative table constraints are always expanded. + optional bool expand_table_constraints = 158 [default = true]; + + // If true, expand all_different constraints that are not permutations. + // Permutations (#Variables = #Values) are always expanded. + optional bool expand_alldiff_constraints = 170 [default = false]; + + // If true, expand the reservoir constraints by creating booleans for all + // possible precedences between event and encoding the constraint. + optional bool expand_reservoir_constraints = 182 [default = true]; + + // If true, it disable all constraint expansion. + // This should only be used to test the presolve of expanded constraints. + optional bool disable_constraint_expansion = 181 [default = false]; + + // During presolve, we use a maximum clique heuristic to merge together + // no-overlap constraints or at most one constraints. This code can be slow, + // so we have a limit in place on the number of explored nodes in the + // underlying graph. The internal limit is an int64, but we use double here to + // simplify manual input. + optional double merge_no_overlap_work_limit = 145 [default = 1e12]; + optional double merge_at_most_one_work_limit = 146 [default = 1e8]; + + // How much substitution (also called free variable aggregation in MIP + // litterature) should we perform at presolve. This currently only concerns + // variable appearing only in linear constraints. For now the value 0 turns it + // off and any positive value performs substitution. + optional int32 presolve_substitution_level = 147 [default = 1]; + + // If true, we will extract from linear constraints, enforcement literals of + // the form "integer variable at bound => simplified constraint". This should + // always be beneficial except that we don't always handle them as efficiently + // as we could for now. This causes problem on manna81.mps (LP relaxation not + // as tight it seems) and on neos-3354841-apure.mps.gz (too many literals + // created this way). + optional bool presolve_extract_integer_enforcement = 174 [default = false]; + + // ========================================================================== + // Max-sat parameters + // ========================================================================== + + // For an optimization problem, whether we follow some hints in order to find + // a better first solution. For a variable with hint, the solver will always + // try to follow the hint. It will revert to the variable_branching default + // otherwise. + optional bool use_optimization_hints = 35 [default = true]; + + // Whether we use a simple heuristic to try to minimize an UNSAT core. + optional bool minimize_core = 50 [default = true]; + + // Whether we try to find more independent cores for a given set of + // assumptions in the core based max-SAT algorithms. + optional bool find_multiple_cores = 84 [default = true]; + + // If true, when the max-sat algo find a core, we compute the minimal number + // of literals in the core that needs to be true to have a feasible solution. + optional bool cover_optimization = 89 [default = true]; + + // In what order do we add the assumptions in a core-based max-sat algorithm + enum MaxSatAssumptionOrder { + DEFAULT_ASSUMPTION_ORDER = 0; + ORDER_ASSUMPTION_BY_DEPTH = 1; + ORDER_ASSUMPTION_BY_WEIGHT = 2; + } + optional MaxSatAssumptionOrder max_sat_assumption_order = 51 + [default = DEFAULT_ASSUMPTION_ORDER]; + + // If true, adds the assumption in the reverse order of the one defined by + // max_sat_assumption_order. + optional bool max_sat_reverse_assumption_order = 52 [default = false]; + + // What stratification algorithm we use in the presence of weight. + enum MaxSatStratificationAlgorithm { + // No stratification of the problem. + STRATIFICATION_NONE = 0; + + // Start with literals with the highest weight, and when SAT, add the + // literals with the next highest weight and so on. + STRATIFICATION_DESCENT = 1; + + // Start with all literals. Each time a core is found with a given minimum + // weight, do not consider literals with a lower weight for the next core + // computation. If the subproblem is SAT, do like in STRATIFICATION_DESCENT + // and just add the literals with the next highest weight. + STRATIFICATION_ASCENT = 2; + } + optional MaxSatStratificationAlgorithm max_sat_stratification = 53 + [default = STRATIFICATION_DESCENT]; + + // ========================================================================== + // Constraint programming parameters + // ========================================================================== + + // When this is true, then a disjunctive constraint will try to use the + // precedence relations between time intervals to propagate their bounds + // further. For instance if task A and B are both before C and task A and B + // are in disjunction, then we can deduce that task C must start after + // duration(A) + duration(B) instead of simply max(duration(A), duration(B)), + // provided that the start time for all task was currently zero. + // + // This always result in better propagation, but it is usually slow, so + // depending on the problem, turning this off may lead to a faster solution. + optional bool use_precedences_in_disjunctive_constraint = 74 [default = true]; + + // When this is true, the cumulative constraint is reinforced with overload + // checking, i.e., an additional level of reasoning based on energy. This + // additional level supplements the default level of reasoning as well as + // timetable edge finding. + // + // This always result in better propagation, but it is usually slow, so + // depending on the problem, turning this off may lead to a faster solution. + optional bool use_overload_checker_in_cumulative_constraint = 78 + [default = false]; + + // When this is true, the cumulative constraint is reinforced with timetable + // edge finding, i.e., an additional level of reasoning based on the + // conjunction of energy and mandatory parts. This additional level + // supplements the default level of reasoning as well as overload_checker. + // + // This always result in better propagation, but it is usually slow, so + // depending on the problem, turning this off may lead to a faster solution. + optional bool use_timetable_edge_finding_in_cumulative_constraint = 79 + [default = false]; + + // When this is true, the cumulative constraint is reinforced with propagators + // from the disjunctive constraint to improve the inference on a set of tasks + // that are disjunctive at the root of the problem. This additional level + // supplements the default level of reasoning. + // + // Propagators of the cumulative constraint will not be used at all if all the + // tasks are disjunctive at root node. + // + // This always result in better propagation, but it is usually slow, so + // depending on the problem, turning this off may lead to a faster solution. + optional bool use_disjunctive_constraint_in_cumulative_constraint = 80 + [default = true]; + + // A non-negative level indicating the type of constraints we consider in the + // LP relaxation. At level zero, no LP relaxation is used. At level 1, only + // the linear constraint and full encoding are added. At level 2, we also add + // all the Boolean constraints. + optional int32 linearization_level = 90 [default = 1]; + + // A non-negative level indicating how much we should try to fully encode + // Integer variables as Boolean. + optional int32 boolean_encoding_level = 107 [default = 1]; + + // The limit on the number of cuts in our cut pool. When this is reached we do + // not generate cuts anymore. + // + // TODO(user): We should probably remove this parameters, and just always + // generate cuts but only keep the best n or something. + optional int32 max_num_cuts = 91 [default = 10000]; + + // For the cut that can be generated at any level, this control if we only + // try to generate them at the root node. + optional bool only_add_cuts_at_level_zero = 92 [default = false]; + + // Whether we generate knapsack cuts. Note that in our setting where all + // variables are integer and bounded on both side, such a cut could be applied + // to any constraint. + optional bool add_knapsack_cuts = 111 [default = false]; + + // Whether we generate and add Chvatal-Gomory cuts to the LP at root node. + // Note that for now, this is not heavily tuned. + optional bool add_cg_cuts = 117 [default = true]; + + // Whether we generate MIR cuts at root node. + // Note that for now, this is not heavily tuned. + optional bool add_mir_cuts = 120 [default = true]; + + // Whether we generate Zero-Half cuts at root node. + // Note that for now, this is not heavily tuned. + optional bool add_zero_half_cuts = 169 [default = true]; + + // Whether we generate clique cuts from the binary implication graph. Note + // that as the search goes on, this graph will contains new binary clauses + // learned by the SAT engine. + optional bool add_clique_cuts = 172 [default = true]; + + // Cut generator for all diffs can add too many cuts for large all_diff + // constraints. This parameter restricts the large all_diff constraints to + // have a cut generator. + optional int32 max_all_diff_cut_size = 148 [default = 7]; + + // For the lin max constraints, generates the cuts described in "Strong + // mixed-integer programming formulations for trained neural networks" by Ross + // Anderson et. (https://arxiv.org/pdf/1811.01988.pdf) + optional bool add_lin_max_cuts = 152 [default = true]; + + // In the integer rounding procedure used for MIR and Gomory cut, the maximum + // "scaling" we use (must be positive). The lower this is, the lower the + // integer coefficients of the cut will be. Note that cut generated by lower + // values are not necessarily worse than cut generated by larger value. There + // is no strict dominance relationship. + // + // Setting this to 2 result in the "strong fractional rouding" of Letchford + // and Lodi. + optional int32 max_integer_rounding_scaling = 119 [default = 600]; + + // If true, we start by an empty LP, and only add constraints not satisfied + // by the current LP solution batch by batch. A constraint that is only added + // like this is known as a "lazy" constraint in the literature, except that we + // currently consider all constraints as lazy here. + optional bool add_lp_constraints_lazily = 112 [default = true]; + + // While adding constraints, skip the constraints which have orthogonality + // less than 'min_orthogonality_for_lp_constraints' with already added + // constraints during current call. Orthogonality is defined as 1 - + // cosine(vector angle between constraints). A value of zero disable this + // feature. + optional double min_orthogonality_for_lp_constraints = 115 [default = 0.05]; + + // Max number of time we perform cut generation and resolve the LP at level 0. + optional int32 max_cut_rounds_at_level_zero = 154 [default = 1]; + + // If a constraint/cut in LP is not active for that many consecutive OPTIMAL + // solves, remove it from the LP. Note that it might be added again later if + // it become violated by the current LP solution. + optional int32 max_consecutive_inactive_count = 121 [default = 100]; + + // These parameters are similar to sat clause management activity parameters. + // They are effective only if the number of generated cuts exceed the storage + // limit. Default values are based on a few experiments on miplib instances. + optional double cut_max_active_count_value = 155 [default = 1e10]; + optional double cut_active_count_decay = 156 [default = 0.8]; + + // Target number of constraints to remove during cleanup. + optional int32 cut_cleanup_target = 157 [default = 1000]; + + // Add that many lazy constraints (or cuts) at once in the LP. Note that at + // the beginning of the solve, we do add more than this. + optional int32 new_constraints_batch_size = 122 [default = 50]; + + // The search branching will be used to decide how to branch on unfixed nodes. + enum SearchBranching { + // Try to fix all literals using the underlying SAT solver's heuristics, + // then generate and fix literals until integer variables are fixed. + AUTOMATIC_SEARCH = 0; + + // If used then all decisions taken by the solver are made using a fixed + // order as specified in the API or in the CpModelProto search_strategy + // field. + FIXED_SEARCH = 1; + + // If used, the solver will use various generic heuristics in turn. + PORTFOLIO_SEARCH = 2; + + // If used, the solver will use heuristics from the LP relaxation. This + // exploit the reduced costs of the variables in the relaxation. + // + // TODO(user): Maybe rename REDUCED_COST_SEARCH? + LP_SEARCH = 3; + + // If used, the solver uses the pseudo costs for branching. Pseudo costs + // are computed using the historical change in objective bounds when some + // decision are taken. + PSEUDO_COST_SEARCH = 4; + + // Mainly exposed here for testing. This quickly tries a lot of randomized + // heuristics with a low conflict limit. It usually provides a good first + // solution. + PORTFOLIO_WITH_QUICK_RESTART_SEARCH = 5; + + // Mainly used internally. This is like FIXED_SEARCH, except we follow the + // solution_hint field of the CpModelProto rather than using the information + // provided in the search_strategy. + HINT_SEARCH = 6; + } + optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH]; + + // Conflict limit used in the phase that exploit the solution hint. + optional int32 hint_conflict_limit = 153 [default = 10]; + + // If true, the solver tries to repair the solution given in the hint. This + // search terminates after the 'hint_conflict_limit' is reached and the solver + // switches to regular search. If false, then we do a FIXED_SEARCH using the + // hint until the hint_conflict_limit is reached. + optional bool repair_hint = 167 [default = false]; + + // All the "exploit_*" parameters below work in the same way: when branching + // on an IntegerVariable, these parameters affect the value the variable is + // branched on. Currently the first heuristic that triggers win in the order + // in which they appear below. + // + // TODO(user): Maybe do like for the restart algorithm, introduce an enum + // and a repeated field that control the order on which these are applied? + + // If true and the Lp relaxation of the problem has an integer optimal + // solution, try to exploit it. Note that since the LP relaxation may not + // contain all the constraints, such a solution is not necessarily a solution + // of the full problem. + optional bool exploit_integer_lp_solution = 94 [default = true]; + + // If true and the Lp relaxation of the problem has a solution, try to exploit + // it. This is same as above except in this case the lp solution might not be + // an integer solution. + optional bool exploit_all_lp_solution = 116 [default = true]; + + // When branching on a variable, follow the last best solution value. + optional bool exploit_best_solution = 130 [default = false]; + + // When branching on a variable, follow the last best relaxation solution + // value. We use the relaxation with the tightest bound on the objective as + // the best relaxation solution. + optional bool exploit_relaxation_solution = 161 [default = false]; + + // When branching an a variable that directly affect the objective, + // branch on the value that lead to the best objective first. + optional bool exploit_objective = 131 [default = true]; + + // If set at zero (the default), it is disabled. Otherwise the solver attempts + // probing at every 'probing_period' root node. Period of 1 enables probing at + // every root node. + optional int64 probing_period_at_root = 142 [default = 0]; + + // If true, search will continuously probe Boolean variables, and integer + // variable bounds. + optional bool use_probing_search = 176 [default = false]; + + // The solver ignores the pseudo costs of variables with number of recordings + // less than this threshold. + optional int64 pseudo_cost_reliability_threshold = 123 [default = 100]; + + // The default optimization method is a simple "linear scan", each time trying + // to find a better solution than the previous one. If this is true, then we + // use a core-based approach (like in max-SAT) when we try to increase the + // lower bound instead. + optional bool optimize_with_core = 83 [default = false]; + + // If non-negative, perform a binary search on the objective variable in order + // to find an [min, max] interval outside of which the solver proved unsat/sat + // under this amount of conflict. This can quickly reduce the objective domain + // on some problems. + optional int32 binary_search_num_conflicts = 99 [default = -1]; + + // This has no effect if optimize_with_core is false. If true, use a different + // core-based algorithm similar to the max-HS algo for max-SAT. This is a + // hybrid MIP/CP approach and it uses a MIP solver in addition to the CP/SAT + // one. This is also related to the PhD work of tobyodavies@ + // "Automatic Logic-Based Benders Decomposition with MiniZinc" + // http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14489 + optional bool optimize_with_max_hs = 85 [default = false]; + + // Whether we enumerate all solutions of a problem without objective. Note + // that setting this to true automatically disable the presolve. This is + // because the presolve rules only guarantee the existence of one feasible + // solution to the presolved problem. + // + // TODO(user): Do not disable the presolve and let the user choose what + // behavior is best by setting keep_all_feasible_solutions_in_presolve. + optional bool enumerate_all_solutions = 87 [default = false]; + + // If true, we disable the presolve reductions that remove feasible solutions + // from the search space. Such solution are usually dominated by a "better" + // solution that is kept, but depending on the situation, we might want to + // keep all solutions. + // + // A trivial example is when a variable is unused. If this is true, then the + // presolve will not fix it to an arbitrary value and it will stay in the + // search space. + optional bool keep_all_feasible_solutions_in_presolve = 173 [default = false]; + + // If true, add information about the derived variable domains to the + // CpSolverResponse. It is an option because it makes the response slighly + // bigger and there is a bit more work involved during the postsolve to + // construct it, but it should still have a low overhead. See the + // tightened_variables field in CpSolverResponse for more details. + optional bool fill_tightened_domains_in_response = 132 [default = false]; + + // If true, the solver will add a default integer branching strategy to the + // already defined search strategy. + optional bool instantiate_all_variables = 106 [default = true]; + + // If true, then the precedences propagator try to detect for each variable if + // it has a set of "optional incoming arc" for which at least one of them is + // present. This is usually useful to have but can be slow on model with a lot + // of precedence. + optional bool auto_detect_greater_than_at_least_one_of = 95 [default = true]; + + // For an optimization problem, stop the solver as soon as we have a solution. + optional bool stop_after_first_solution = 98 [default = false]; + + // Mainly used when improving the presolver. When true, stops the solver after + // the presolve is complete. + optional bool stop_after_presolve = 149 [default = false]; + + // Specify the number of parallel workers to use during search. + // A number <= 1 means no parallelism. + // As of 2020-04-10, if you're using SAT via MPSolver (to solve integer + // programs) this field is overridden with a value of 8, if the field is not + // set *explicitly*. Thus, always set this field explicitly or via + // MPSolver::SetNumThreads(). + optional int32 num_search_workers = 100 [default = 1]; + + // Experimental. If this is true, then we interleave all our major search + // strategy and distribute the work amongst num_search_workers. + // + // The search is deterministic (independently of num_search_workers!), and we + // schedule and wait for interleave_batch_size task to be completed before + // synchronizing and scheduling the next batch of tasks. + optional bool interleave_search = 136 [default = false]; + optional int32 interleave_batch_size = 134 [default = 1]; + + // Temporary parameter until the memory usage is more optimized. + optional bool reduce_memory_usage_in_interleave_mode = 141 [default = false]; + + // Allows objective sharing between workers. + optional bool share_objective_bounds = 113 [default = true]; + + // Allows sharing of the bounds of modified variables at level 0. + optional bool share_level_zero_bounds = 114 [default = true]; + + // LNS parameters. + optional bool use_lns_only = 101 [default = false]; + optional bool lns_focus_on_decision_variables = 105 [default = false]; + optional bool lns_expand_intervals_in_constraint_graph = 184 [default = true]; + + // Turns on relaxation induced neighborhood generator. + optional bool use_rins_lns = 129 [default = true]; + + // Adds a feasibility pump subsolver along with lns subsolvers. + optional bool use_feasibility_pump = 164 [default = true]; + + // Rounding method to use for feasibility pump. + enum FPRoundingMethod { + // Rounds to the nearest integer value. + NEAREST_INTEGER = 0; + + // Counts the number of linear constraints restricting the variable in the + // increasing values (up locks) and decreasing values (down locks). Rounds + // the variable in the direction of lesser locks. + LOCK_BASED = 1; + + // Similar to lock based rounding except this only considers locks of active + // constraints from the last lp solve. + ACTIVE_LOCK_BASED = 3; + + // This is expensive rounding algorithm. We round variables one by one and + // propagate the bounds in between. If none of the rounded values fall in + // the continuous domain specified by lower and upper bound, we use the + // current lower/upper bound (whichever one is closest) instead of rounding + // the fractional lp solution value. If both the rounded values are in the + // domain, we round to nearest integer. + PROPAGATION_ASSISTED = 2; + } + optional FPRoundingMethod fp_rounding = 165 [default = PROPAGATION_ASSISTED]; + + // Turns on a lns worker which solves relaxed version of the original problem + // by removing constraints from the problem in order to get better bounds. + optional bool use_relaxation_lns = 150 [default = false]; + + // If true, registers more lns subsolvers with different parameters. + optional bool diversify_lns_params = 137 [default = false]; + + // Randomize fixed search. + optional bool randomize_search = 103 [default = false]; + + // Search randomization will collect equivalent 'max valued' variables, and + // pick one randomly. For instance, if the variable strategy is CHOOSE_FIRST, + // all unassigned variables are equivalent. If the variable strategy is + // CHOOSE_LOWEST_MIN, and `lm` is the current lowest min of all unassigned + // variables, then the set of max valued variables will be all unassigned + // variables where + // lm <= variable min <= lm + search_randomization_tolerance + optional int64 search_randomization_tolerance = 104 [default = 0]; + + // If true, we automatically detect variables whose constraint are always + // enforced by the same literal and we mark them as optional. This allows + // to propagate them as if they were present in some situation. + optional bool use_optional_variables = 108 [default = true]; + + // The solver usually exploit the LP relaxation of a model. If this option is + // true, then whatever is infered by the LP will be used like an heuristic to + // compute EXACT propagation on the IP. So with this option, there is no + // numerical imprecision issues. + optional bool use_exact_lp_reason = 109 [default = true]; + + // If true, the solver attemts to generate more info inside lp propagator by + // branching on some variables if certain criteria are met during the search + // tree exploration. + optional bool use_branching_in_lp = 139 [default = false]; + + // This can be beneficial if there is a lot of no-overlap constraints but a + // relatively low number of different intervals in the problem. Like 1000 + // intervals, but 1M intervals in the no-overlap constraints covering them. + optional bool use_combined_no_overlap = 133 [default = false]; + + // Indicates if the CP-SAT layer should catch Control-C (SIGINT) signals + // when calling solve. If set, catching the SIGINT signal will terminate the + // search gracefully, as if a time limit was reached. + optional bool catch_sigint_signal = 135 [default = true]; + + // Stores and exploits "implied-bounds" in the solver. That is, relations of + // the form literal => (var >= bound). This is currently used to derive + // stronger cuts. + optional bool use_implied_bounds = 144 [default = true]; + + // Whether we try to do a few degenerate iteration at the end of an LP solve + // to minimize the fractionality of the integer variable in the basis. This + // helps on some problems, but not so much on others. It also cost of bit of + // time to do such polish step. + optional bool polish_lp_solution = 175 [default = false]; + + // Temporary flag util the feature is more mature. This convert intervals to + // the newer proto format that support affine start/var/end instead of just + // variables. It changes a bit the search and is not always better currently. + optional bool convert_intervals = 177 [default = false]; + + // Whether we try to automatically detect the symmetries in a model and + // exploit them. Currently, at level 1 we detect them in presolve and try + // to fix Booleans. At level 2, we also do some form of dynamic symmetry + // breaking during search. + optional int32 symmetry_level = 183 [default = 2]; + + // ========================================================================== + // MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are + // used by our automatic "scaling" algorithm. + // + // Note that it is hard to do a meaningful conversion automatically and if + // you have a model with continuous variables, it is best if you scale the + // domain of the variable yourself so that you have a relevant precision for + // the application at hand. Same for the coefficients and constraint bounds. + // ========================================================================== + + // We need to bound the maximum magnitude of the variables for CP-SAT, and + // that is the bound we use. If the MIP model expect larger variable value in + // the solution, then the converted model will likely not be relevant. + optional double mip_max_bound = 124 [default = 1e7]; + + // All continuous variable of the problem will be multiplied by this factor. + // By default, we don't do any variable scaling and rely on the MIP model to + // specify continuous variable domain with the wanted precision. + optional double mip_var_scaling = 125 [default = 1.0]; + + // If true, some continuous variable might be automatially scaled. For now, + // this is only the case where we detect that a variable is actually an + // integer multiple of a constant. For instance, variables of the form k * 0.5 + // are quite frequent, and if we detect this, we will scale such variable + // domain by 2 to make it implied integer. + optional bool mip_automatically_scale_variables = 166 [default = true]; + + // When scaling constraint with double coefficients to integer coefficients, + // we will multiply by a power of 2 and round the coefficients. We will choose + // the lowest power such that we have no potential overflow and the worst case + // constraint activity error do not exceed this threshold relative to the + // constraint bounds. + // + // We also use this to decide by how much we relax the constraint bounds so + // that we can have a feasible integer solution of constraints involving + // continuous variable. This is required for instance when you have an == rhs + // constraint as in many situation you cannot have a perfect equality with + // integer variables and coefficients. + optional double mip_wanted_precision = 126 [default = 1e-6]; + + // To avoid integer overflow, we always force the maximum possible constraint + // activity (and objective value) according to the initial variable domain to + // be smaller than 2 to this given power. Because of this, we cannot always + // reach the "mip_wanted_precision" parameter above. + // + // This can go as high as 62, but some internal algo currently abort early if + // they might run into integer overflow, so it is better to keep it a bit + // lower than this. + optional int32 mip_max_activity_exponent = 127 [default = 53]; + + // As explained in mip_precision and mip_max_activity_exponent, we cannot + // always reach the wanted precision during scaling. We use this threshold to + // enphasize in the logs when the precision seems bad. + optional double mip_check_precision = 128 [default = 1e-4]; +}