Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
import kotlin.math.abs
fun sum(a1: Int, a2: Int, b1: Int, b2: Int) = abs(a1 - a2) + abs(b1 - b2)
fun main() {
repeat(readLine()!!.toInt()) {
val n = readLine()!!.toInt()
val a = readLine()!!.split(' ').map { it.toInt() }.toIntArray()
val b = readLine()!!.split(' ').map { it.toInt() }.toIntArray()
var sum = 0L
for (i in 1 until n) {
if (sum(a[i - 1], a[i], b[i - 1], b[i]) > sum(a[i - 1], b[i], b[i - 1], a[i]))
a[i] = b[i].also { b[i] = a[i] }
sum += sum(a[i - 1], a[i], b[i - 1], b[i])
}
println(sum)
}
}
```

Idea: adedalic

**Tutorial**

Tutorial is loading...

**Solution (adedalic)**

```
fun main() {
val n = readLine()!!.toInt()
val a = readLine()!!.split(' ').map { it.toInt() }.toIntArray()
for (v in a) {
var ans = 20
for (cntAdd in 0..15) {
for (cntMul in 0..15) {
if (((v + cntAdd) shl cntMul) % 32768 == 0)
ans = minOf(ans, cntAdd + cntMul)
}
}
print("$ans ")
}
}
```

Idea: vovuh

**Tutorial**

Tutorial is loading...

**Solution 1 (vovuh)**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int tc;
scanf("%d", &tc);
while (tc--) {
int n;
scanf("%d", &n);
vector<int> h(n);
for (auto &it : h) {
scanf("%d", &it);
}
int mx = *max_element(h.begin(), h.end());
long long ans = 1e18;
for (auto need : {mx, mx + 1}) {
long long l = 0, r = 1e18;
long long res = -1;
while (l <= r) {
long long mid = (l + r) >> 1;
long long cnt1 = (mid + 1) / 2, cnt2 = mid - cnt1;
long long need1 = 0;
for (auto ch : h) {
long long cur = (need - ch) / 2;
if ((need - ch) % 2 == 1) {
++need1;
}
if (cnt2 >= cur) {
cnt2 -= cur;
} else {
cur -= cnt2;
cnt2 = 0;
need1 += cur * 2;
}
}
if (need1 <= cnt1) {
res = mid;
r = mid - 1;
} else {
l = mid + 1;
}
}
ans = min(ans, res);
}
printf("%lld\n", ans);
}
return 0;
}
```

**Solution 2 (awoo)**

```
#include <bits/stdc++.h>
using namespace std;
#define forn(i, n) for(int i = 0; i < int(n); i++)
int main(){
int tc;
scanf("%d", &tc);
while (tc--) {
int n;
scanf("%d", &n);
vector<int> a(n);
forn(i, n) scanf("%d", &a[i]);
long long ans = 1e18;
int mx = *max_element(a.begin(), a.end());
for (int x : {mx, mx + 1}){
long long cnt1 = 0, cnt2 = 0;
forn(i, n){
cnt2 += (x - a[i]) / 2;
cnt1 += (x - a[i]) % 2;
}
long long dif = max(0ll, cnt2 - cnt1 - 1) / 3;
cnt1 += dif * 2;
cnt2 -= dif;
ans = min(ans, max(cnt1 * 2 - 1, cnt2 * 2));
if (cnt2 > 0){
cnt1 += 2;
--cnt2;
ans = min(ans, max(cnt1 * 2 - 1, cnt2 * 2));
}
}
printf("%lld\n", ans);
}
}
```

Idea: vovuh

**Tutorial**

Tutorial is loading...

**Solution (vovuh)**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n, k;
scanf("%d %d", &n, &k);
vector<long long> b(n);
for (auto &it : b) {
scanf("%lld", &it);
}
vector<long long> closed(n);
long long sum = 0, cnt = 0, ans = 0;
for (int i = n - 1; i >= 0; --i) {
sum -= cnt;
cnt -= closed[i];
b[i] -= sum;
if (b[i] <= 0) {
continue;
}
int el = min(i + 1, k);
long long need = (b[i] + el - 1) / el;
sum += need * el;
cnt += need;
ans += need;
if (i - el >= 0) {
closed[i - el] += need;
}
}
printf("%lld\n", ans);
return 0;
}
```

Idea: BledDest

**Tutorial**

Tutorial is loading...

**Solution (awoo)**

```
#include <bits/stdc++.h>
using namespace std;
#define forn(i, n) for(int i = 0; i < int(n); i++)
int main(){
cin.tie(0);
iostream::sync_with_stdio(false);
int n;
cin >> n;
vector<string> s(3);
forn(i, 3) cin >> s[i];
vector<int> pr(n + 1);
forn(i, n){
pr[i + 1] += pr[i];
forn(j, 3) pr[i + 1] += (s[j][i] == '1');
}
vector<int> p(3 * n), rk(3 * n, 1);
iota(p.begin(), p.end(), 0);
function<int(int)> getp;
getp = [&](int a) -> int {
return a == p[a] ? a : p[a] = getp(p[a]);
};
auto unite = [&](int a, int b) -> bool {
a = getp(a), b = getp(b);
if (a == b) return false;
if (rk[a] < rk[b]) swap(a, b);
p[b] = a;
rk[a] += rk[b];
return true;
};
vector<int> prhe(n + 1), prve(n + 1);
forn(j, n){
forn(i, 2) if (s[i][j] == '1' && s[i + 1][j] == '1' && unite(i * n + j, (i + 1) * n + j))
++prve[j + 1];
forn(i, 3) if (j > 0 && s[i][j] == '1' && s[i][j - 1] == '1' && unite(i * n + j, i * n + (j - 1)))
++prhe[j];
}
forn(i, n) prve[i + 1] += prve[i];
forn(i, n) prhe[i + 1] += prhe[i];
vector<int> nxt(n + 1, 0);
for (int i = n - 1; i >= 0; --i)
nxt[i] = (s[0][i] == '1' && s[1][i] == '0' && s[2][i] == '1' ? (nxt[i + 1] + 1) : 0);
int m;
cin >> m;
forn(_, m){
int l, r;
cin >> l >> r;
--l, --r;
int non101 = l + nxt[l];
if (non101 > r){
cout << "2\n";
continue;
}
int tot = pr[r + 1] - pr[non101];
int in = (prve[r + 1] - prve[non101]) + (prhe[r] - prhe[non101]);
int res = tot - in;
if (non101 != l){
if (s[0][non101] == '1' && s[1][non101] == '1' && s[2][non101] == '1');
else if (s[0][non101] == '0' && s[2][non101] == '0') res += 2;
else ++res;
}
cout << res << "\n";
}
return 0;
}
```

Idea: vovuh

**Tutorial**

Tutorial is loading...

**Solution (BledDest)**

```
#include <bits/stdc++.h>
using namespace std;
int n;
vector<int> lens;
long long sqr(int x)
{
return x * 1ll * x;
}
long long eval_split(int len, int k)
{
return sqr(len / k) * (k - len % k) + sqr(len / k + 1) * (len % k);
}
pair<int, long long> eval_segment(int len, long long bound)
{
// only take options with value >= bound
if(bound <= 2 || len == 1)
return make_pair(len - 1, len);
int lf = 0;
int rg = len - 1;
while(rg - lf > 1)
{
int mid = (lf + rg) / 2;
if(eval_split(len, mid) - eval_split(len, mid + 1) >= bound)
lf = mid;
else
rg = mid;
}
return make_pair(lf, eval_split(len, lf + 1));
}
pair<int, long long> eval_full(long long bound)
{
pair<int, long long> ans;
for(auto x : lens)
{
pair<int, long long> cur = eval_segment(x, bound);
ans.first += cur.first;
ans.second += cur.second;
}
return ans;
}
int main()
{
scanf("%d", &n);
int pr = 0;
for(int i = 0; i < n; i++)
{
int x;
scanf("%d", &x);
lens.push_back(x - pr);
pr = x;
}
long long w;
scanf("%lld", &w);
long long lf = 0ll;
long long rg = (long long)(1e18) + 43;
if(eval_full(rg).second <= w)
{
cout << 0 << endl;
return 0;
}
while(rg - lf > 1)
{
long long mid = (lf + rg) / 2;
pair<int, long long> res = eval_full(mid);
if(res.second <= w)
lf = mid;
else
rg = mid;
}
pair<int, long long> resL = eval_full(lf);
pair<int, long long> resR = eval_full(rg);
assert(resL.second <= w && resR.second > w);
long long change = (resR.second - resL.second) / (resR.first - resL.first);
cout << resL.first + (w - resL.second) / change << endl;
}
```

If you are/were getting a

WA/REverdict on problems from this contest, you can get thesmallestpossiblecounter examplefor your submission on cfstress.com. To do that, click on the relevant problem's link below, add your submission ID, and edit the table (or edit compressed parameters) to increase/decrease the constraints.I've also added 2 new features: Near real-time

log streamingandCompressed Parameter editingwithout using tables.If you are not able to find a counter example even after changing the parameters, reply to this thread (only till the next 7 days), with links to your

submissionandticket(s).My solution to 1661E - Narrow Components using persistent DSU and segment tree: 153317120.

For each node of segment tree I store a DSU containing nodes and edges in its range [Lx, Rx). When querying the tree, first I make save point, then I merge all ranges using edges between them and print answer, then I rollback updates (erase added edges in query).

Because of rollback function DSU operations take $$$O(logn)$$$ (there's no path compression).

Total time complexity: $$$O(H*(n+q)log^2n)$$$

Total space complexity: $$$O(H*nlogn)$$$

Where H is height of matrix. This solution will work for H > 3.

My English is too bad to explain the correctness of $$$f(x, k) - f(x, k + 1) \ge f(x, k + 1) - f(x, k + 2)$$$ in problem F.

But I have an ugly figure to prove it.

Thanks to Potassium to translate my proof into English as following:

Consider the case when $$$x = 5$$$, we draw a matrix as follows. When $$$i > 1$$$, all numbers in the $$$i$$$-th row from the bottom is $$$\frac{1}{i} - \frac{1}{(i-1)}$$$. For every square that touches the bottom edge, the sum of numbers inside that square must be $$$1$$$. This is because the sum of any square of length $$$n$$$ must be $$$((\frac{1}{n} - \frac{1}{(n-1)}) + (\frac{1}{(n-1)} - \frac{1}{(n-2)}) \ldots ) \times n = 1$$$. This means that if we draw $$$k$$$ squares to partition and cover the bottom edge, the sum of all numbers inside these squares must be $$$k$$$. Notice that the sum of areas of these squares will correspond to $$$f(x, k)$$$. We define the "optimal canonical partition" of $$$f(x, k)$$$ as the one which is obtained by removing the numbers starting from the top row and from the leftmost column. It is easy to see that we will visit the optimal canonical partition of $$$f(x, 1)$$$, $$$f(x, 2)$$$, $$$\ldots$$$, $$$f(x, x)$$$ in this order. In particular, $$$f(x, k)$$$ is visited when the sum of all remaining numbers is equal to $$$k$$$. The value of $$$f(x, k) - f(x, k + 1)$$$ is the number of numbers we are removing to obtain the optimal canonical partition of $$$f(x, k + 1)$$$ starting from the optimal canonical partition of $$$f(x, k)$$$. As the numbers we are removing are non-increasing, the number of numbers we are removing between each consecutive optimal canonical partition can only decrease. This proves that $$$f(x, k) - f(x, k + 1) \ge f(x, k + 1) - f(x, k + 2)$$$ as desired.

How would you have time to learn English you have 5.9k problems solved on Codeforces bro

I have a proof for a generalized version of this problem, where you can only choose points in a given set of coordinates instead of all integers. Note that this problem is equivalent to a classic problem: given an array of $$$n$$$ positive numbers, partition it into $$$k$$$ segments to minimize square sum of the total value of each segment.

The idea is to take a solution of both $$$k-1$$$ and $$$k+1$$$, called $$$A$$$ and $$$B$$$, then find two solutions of $$$k$$$, whose sum is at most $$$A+B$$$, then twice the less one is at most $$$A+B$$$, so we have $$$2f(k)\le f(k-1)+f(k+1)$$$.

We take the partition points of $$$A$$$ and $$$B$$$, and merge them into a sorted sequence, like $$$ababbabb$$$. We can always partition this sequence into two, called $$$S$$$ and $$$T$$$, where the number of $$$b$$$s is equal to the number of $$$a$$$s plus $$$1$$$ in both $$$S$$$ and $$$T$$$, $$$S$$$ ends in $$$b$$$, and $$$T$$$ starts in $$$b$$$.

We then flip $$$T$$$ ($$$a$$$ becomes $$$b$$$ and $$$b$$$ becomes $$$a$$$) so the numbers of $$$a$$$s and $$$b$$$s become the same, that we have two solutions of $$$k$$$. You can see that only two segments have changed, and it's like $$$abba$$$ becomes $$$abab$$$, so obviously they have less sum.

For example, $$$x=10$$$, and we have two solutions $$$[3,7]$$$ and $$$[2,4,6,8]$$$, we merge them into $$$[2_b,3_a,4_b,6_b,7_a,8_b]$$$, then we flip the last $$$3$$$ elements, thus we have $$$[2_b,3_a,4_b,6_a,7_b,8_a]$$$, the only change is aht $$$3_a,4_b,6_b,7_a$$$ becomes $$$3_a,4_b,6_a,7_b$$$.

It seems that the total length of new set of $$$a$$$ or $$$b$$$ $$$\not= x$$$. Did I misunderstand something?

Are the contests getting tougher these days? every contest seems tougher than the last one

Just cope harder.

It's true

There is an $$$O(n + q)$$$ solution to E that involves Euler's formula. As Euler's formula for planar graphs states

$$$cc = v - e + f - 1$$$

$$$v$$$ is vertex count.

$$$e$$$ is edge count.

$$$f$$$ is face count.

$$$cc$$$ is the number of connected components.

Here we are solving for $$$cc$$$; $$$v$$$ is the number of while nodes; $$$e$$$ is the number of pairs of white nodes that are adjacent $$$f$$$ is the number of "blobs" that are composed completely of white nodes and have no white nodes inside of them.

a couple examples:

is 1 face.

is two faces.

is one face.

So we can use prefix sums to maintain $$$v$$$, $$$e$$$ (with two arrays, one for vertical and one for horizontal), and one for faces of $$$f$$$ that are a $$$2x2$$$ block of $$$1$$$'s. Then for each of the faces that are in the form of

you also use an array but you also have to make sure not to count an unfinished face of such. It is also important to not overcount

as three faces.

Also, you have to count the area outside the queried region as $$$1$$$ face.

With this in mind you can use a few prefix sum arrays and some other arrays for the height 3 faces to solve this problem.

My terrible implementation of this idea is here.

Yeah, I also thought of this solution, cause a similar problem appeared in 2017 APIO, it was called Rainbow. But I thought that our graph might not be connected. So I solved it differently.

I made a segment tree that maintains answer and for cells in the rightmost and leftmost column, which component they belong to. It was slow, but using some optimizations it passed. Here is the code.

Yeah, I had APIO Rainbow in mind when I first implemented this. The only difference between this and Rainbow is the special case of faces height 3. Luckily, 3 is small enough that you can still just do casework and get away with it.

in problem B I understand why we are checking all 15 combinations of 2's power. but can not understand the reason behind taking 15 possible additions. why cant the answer be in lets say (v+20) and then some 2's power in 15 range.

can someone explain? why we are adding 0 to 15 in cntADD

edit: ok i think i get it now. there is no (v+20) can be an answer because at most 15 steps are required. that is why even we add we can not go beyond 15 anyway. thus 0 to 15 range.

Because v+20 uses 20 moves to get from v just by additions, but you can solve for v in 15 moves by 15 multiplications. So there is no need to use more than 15 moves of any kind.

Can somebody explain why max+1 is better than max in some cases?

Check 1 1 1 1 1 1 2 ans for 2 is 11 for 3 is 9. Anything > max+1 is not relevant as f(max+2) =f(max)+3n/2 and similarly f(max+3) = f(max+1)+3n/2

5 5 5 5 6 Try this test case if you try to make all the values as 6 you will get ans as 7 but if you try to make them 7 you will get answer 6.

I have another solution for E using Segment Tree + inclusion-exclusion:

We can build a segment tree such that each node carries 3 pieces of information ($$$L$$$ : the left border of the segment, $$$R$$$: the right border of the segment, $$$CC$$$: the number of connected components in that segment)

When merging two nodes, we can simply add their corresponding $$$CC$$$ values, but then comes the problem of overcounting.

Some CCs in the resulting segment are counted more than once!

We can then use the inclusion-exclusion principle to count those common CCs.

We have at most 3 edges in all rows between the corresponding cells of two columns (The rightmost column in the left node and the leftmost column in the right node).

Every edge simply adds 1 to the number of common CCs, but then some CCs are over-counted (those shared by two edges) we need to subtract those now. and then add CCs shared by 3 edges.

Isn't it too complicated: the solution for A? We could use the following simple strategy-

Whoa, problem D's solution is so clever to resolve it with O(n) time complexity.

MemeThanks for the round!

Can anyone please explain the segment tree/lazy propagation approach in problem D?

In my approach, I have used segment tree and lazy propagation on the difference array of $$$a$$$. In case you don't know about difference array, you can read about it here. Basically, in a difference array (say $$$diff$$$) of an array $$$a$$$, $$$diff_i$$$ = $$$a_i-a_{i-1} \forall i>0$$$ and $$$diff_0 = a_0$$$. Reporting any value $$$a_i$$$ through the difference array can be done by calculating the prefix sum till $$$i$$$, i.e. $$$diff_0+diff_1+..+diff_i$$$. It is a useful tool for range queries. For example, for adding $$$x$$$ to every element of $$$a$$$ from $$$i$$$ to $$$j$$$ using a difference array, just add $$$x$$$ to $$$diff_i$$$ and subtract $$$x$$$ from $$$diff_{j+1}$$$ and the job is done. Also, adding $$$1$$$ to $$$a_i,$$$ $$$2$$$ to $$$a_{i+1}....k$$$ to $$$a_{i+k-1}$$$ is equivalent to adding $$$1$$$ to all of $$$diff_i,…,diff_{i+k-1}$$$ and subtracting $$$k$$$ from $$$diff_{i+k}$$$, and this is exactly what we’ll be using in this problem.

Similar to the editorial, we will be iterating from right to left, from $$$i = n-1$$$ to $$$i=k$$$, and for every $$$i$$$, we will make $$$a_i \geq b_i$$$ by adding progressions that end at $$$i$$$ (these are the most optimal progressions to add for increasing $$$a_i$$$ as they add $$$k$$$ to $$$a_i$$$). We calculate $$$a_i$$$ at the beginning of each iteration using prefix sum over the difference array, by range sum query (segment tree). Then, we calculate $$$need$$$ (same as editorial), $$$need = \left\lceil \frac{b_i-a_i}{k} \right\rceil$$$. Now, we have to add $$$need$$$ progressions from $$$i-k+1$$$ to $$$i$$$, which is equivalent to adding $$$need$$$ to every element from $$$diff_{i-k+1}..diff_i$$$ and subtracting $$$need \cdot k$$$ from $$$diff_{i+1}$$$. We do this range update using lazy propagation.

Finally, we’ll only be left with elements $$$0$$$ to $$$k-1$$$ and dealing with them is fairly simple, for these leftmost elements, the $$$i$$$ th element can only increase by $$$i+1$$$ in one progression, so we just find $$$max_{0\leq i\leq k-1} \left\lceil \frac{b_i-a_i}{i+1} \right\rceil$$$, i.e. the max $$$need$$$ for the first $$$k$$$ elements and add it to the answer.

You can look at my submission 153552061 for the implementation. I hope I was able to explain everything. Let me know if something is still unclear.

Should we subtracting $$$k$$$ from $$$diff_{i+k}$$$ instead?

Right yeah sorry, it should be $$$k$$$. I've updated it now. Thanks.

That's because we construct the answer backwards so the element i+1 won't be revisited again

Here is my clumsy proof for 1661C - Water the Trees claim that max or max + 1 is enough. I just tell how to transform filling of height (h + 2) into (h) with removal or replacement of moves.

SpoilerFirst of all, I want to note n >= 2 because n = 1 is obvious. Now, for each tree we need to remove moves with sum 2. For each tree do the following: if there is +2 move then remove it, otherwise all moves are +1 so there are at least 2 of those, and we can remove 2. The last move we do is either +2 or +1 so if we remove last one -> we got better time. The idea is to ensure we remove both: move +2 and move +1 to decrease the time for sure. This brings two bad cases:

Details:

If we only removed +1 this means there is no +2 at all, but this means we have at least n * 2 of +1 moves (because h + 2 >= max + 2), and because n >= 2 this means we have at least 4 of +1 moves. Then for at least one tree we can replace two +1 into one +2 and the time will be better, so it wasn't optimal in the first place. (time of last +1 is decreased, and appearance of +2 could only set time to 2, but this to be the case you have to leave with one or zero of +1 moves, and this is impossible.)

If we only removed +2 there are two cases:

In problem A I coded a solution just leaving the smallest numbers on $$$a$$$, just if $$$b_i > a_i$$$ then swap and it worked

Does anyone know why this works? I don't lol

If someone can show for arbitrary real numbers a, b, c, d, we have a <= b && c <= d implies |a — c| + |b — d| <= |a — d| + |b — c|, then we can prove your solution is correct: just imagine an optimal solution and change it to one that satisfies

`forall i in [1..n], a[i] <= b[i]`

from left to right without increasing the sum.I want to mention two things.

In explanation of 1661D - Progressions Covering nothing said why this will work. And proof is simple. The only thing which can affect the last element is the last progression. And we can use it as much as required. Then, remove it from consideration and repeat.

In explanation of 1661F - Teleporters nothing said why we talk about differences. And here is my explanation. Imagine you decided how many portals you would need to insert, it will cost $$$f(x, k)$$$. But we can think of it in a new way: there is list of "upgrades". Each upgrade $$$i$$$ costs $$$f(x, i - 1) - f(x, i)$$$. So first upgrade costs $$$f(x, 0) - f(x, 1)$$$. Next upgrade costs $$$f(x, 1) - f(x, 2)$$$ and so on. So if we do 3 upgrades, we have to pay $$$f(x, 0) - f(x, 1) + f(x, 1) - f(x, 2) + f(x, 2) - f(x, 3) = f(x, 0) - f(x, 3) =$$$ how much we reduced cost from the initial cost. So we can rephrase question into what minimum number of upgrades we need to have their sum cost $$$\geq$$$ difference how much we exceed the limit. Theoretical best way to do this is to pick most valuable upgrades. The only problem is... if we want to take upgrade $$$f(x, i) - f(x, i + 1)$$$ we also had to take all upgrades $$$f(x, j) - f(x, j + 1)$$$ where $$$j < i$$$. But the property $$$f(x, k) - f(x, k + 1) \geq f(x, k + 1) - f(x, k + 2)$$$ exactly implies that if we pick upgrade later, it means we already picked upgrade before, because it has greater or equal cost! So this property allows us to pick most valuable upgrades without violation of upgrades order.

I see Problem B has DP tag. How can this be solved by dp when there can be cycle?

submission

Can anyone explain the BFS solution for problem B ? I have tried to understand the approach from SSRS submission but not getting the logic behind it.

fewest steps to reach 0 from x is shortest path from x to 0 in directed graph with edges (u, v) where u can be transformed into v. Then it's the same as path from 0 to x over "inverse" edges: if (u, v) means u -> v (transformed from u to v), then reverse edge (v, u) is v -> u, but condition is still u can be transformed to v. You can write BFS to find shortest path in following graph. The only thing which is probably interesting is how to find out for v all those u. Split into cases. You able to get from u to v either by +1 -> then just u = v — 1, or you able to get from u to v by u * 2, thus v should be even. Also it's shift to the left in binary representation, so highest bit is carried away, so highest bit in u can be any and its contribution is 32768/2. So if v is even, then u is either v / 2 or v / 2 + 32768 / 2.

Problem E description is absolute garbage. First of all why are the free cells denoted with '1' when the general convention in many of the problems we have solved is the other way around. Also it is not really clear from the description whether the whole component should be inside the interval or just need to have a cell in the interval. Believe it or not if you flip the 0 and 1 and consider the case where the whole component should be in the interval, the answer from the sample test checks out. Wasted like 2 hours trying to figure out what was going on.

how can be solve the getting zero (https://codeforces.com/contest/1661/problem/B) using bfs ?

This is Dfs solution 153314253 by demoralizer. Hope this would help

Can anyone please share a pure BFS implementation, i observed that the given constraint is 2^15 and as per my observation, we need to increase our number by 1 until we are at any power of two and then it is always optimal to double our number until we reach 2^15;

My Segment Tree + Lazy Propagation solution for Problem D : 184339258 Make a difference array of original array, and make its use for adding AP in a range of original array.

Please fix this bug. It will confuse who solve this problem for practice.

In the tutorial of 1661F,

It should be "maximum" since in the solution code, we left

`lf = mid`

(the lower bound of the binary search) when we found a valid value of c (`res.second <= w`

). In fact, the 'minimun value of c' will be 1 by the defination of g(c).Alternate brainless $$$O((n + q)\log{n})$$$ solution for problem E using a segment tree and processing queries offline:

We will iterate over the rightbound $$$r$$$ of queries, and maintain a segment tree $$$S$$$ such that when we are at some $$$r$$$, $$$l$$$'th element of $$$S$$$ will hold answer for range $$$[l, r]$$$.

There is only one important observation: If we consider a single column, there is only case wherein two free cells are not reachable from one another ($$$101$$$). So now, we're only really interested in the places wherein the two disconnected $$$1$$$'s in the $$$101$$$ case get connected. It is pretty obvious that this will happen when a $$$111$$$ is adjacent to a $$$101$$$. We will now maintain at each $$$r$$$, a variable $$$last$$$ which is the minimal $$$l$$$ such that all columns in $$$[l, r - 1]$$$ are $$$101$$$ ($$$last = 0$$$ if no such $$$l$$$).

Using this observation, let us solve this problem. Assume that we have built a valid segment tree till position $$$r - 1$$$, now we move to $$$r$$$, how do we fix the segment tree?

Firstly, each connected component in column $$$r$$$ which is not connected to any cell in the previous column, will increase answer by $$$1$$$ for $$$[l, r]$$$ for all $$$l$$$.

For each connected component in column $$$r$$$ which is connected to some cell in the previous column, this increases answer by $$$1$$$ only for the range $$$[r, r]$$$,

Lastly, if the current column is a $$$111$$$ column and previous column is a $$$101$$$ column, notice that it might be uniting two ones of a $$$101$$$ in range $$$[l, r]$$$ which would have been disjoint in $$$[l, r - 1]$$$. Now there are two cases:

Implementation: link

If someone wants to code this himself, this solution can also be used to solve this problem. (Although, There are a few more observations to be made, do try it out, its fun)

For the sake of completeness, here's the solution to problem C using the "gross formulae" described in the editorial. It is much less readable than binary search but it expresses a different approach to the problem which someone might find useful if they work on it the way I did.

Its logic is that you try and use the minimum amount of gap days. To achieve this, you first water all trees on even days until they reach the maximum height and then exchange some of those even days for odd days to balance the count of odd to even days.

Finally, you choose between the minimum result between heights max and max+1.

233365308