All ideas except the problem C belong to MikeMirzayanov.

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n;
cin >> n;
int cnto = 0;
for (int i = 0; i < n; ++i) {
int x;
cin >> x;
cnto += x & 1;
}
cout << min(cnto, n - cnto) << endl;
return 0;
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
#define forn(i, n) for (int i = 0; i < int(n); i++)
int main() {
int t;
cin >> t;
forn(tt, t) {
int n;
cin >> n;
vector<int> a(n);
forn(i, n)
cin >> a[i];
int ans = 0;
int right_min = INT_MAX;
for (int i = n - 1; i >= 0; i--) {
if (a[i] > right_min)
ans++;
right_min = min(right_min, a[i]);
}
cout << ans << endl;
}
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
#define forn(i, n) for (int i = 0; i < int(n); i++)
int main() {
int q;
cin >> q;
forn(i, q) {
long long n, m;
cin >> n >> m;
n = n / m;
vector<int> digits(10);
forn(i, 10)
digits[i] = ((i + 1) * m) % 10;
long long sum = 0;
forn(i, n % 10)
sum += digits[i];
cout << sum + n / 10 * accumulate(digits.begin(), digits.end(), 0LL) << endl;
}
}
```

1213D1 - Equalizing by Division (easy version)

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n, k;
cin >> n >> k;
vector<int> a(n);
for (int i = 0; i < n; ++i) {
cin >> a[i];
}
vector<int> poss;
for (int i = 0; i < n; ++i) {
int x = a[i];
while (x > 0) {
poss.push_back(x);
x /= 2;
}
}
int ans = 1e9;
for (auto res : poss) {
vector<int> cnt;
for (int i = 0; i < n; ++i) {
int x = a[i];
int cur = 0;
while (x > res) {
x /= 2;
++cur;
}
if (x == res) {
cnt.push_back(cur);
}
}
if (int(cnt.size()) < k) continue;
sort(cnt.begin(), cnt.end());
ans = min(ans, accumulate(cnt.begin(), cnt.begin() + k, 0));
}
cout << ans << endl;
return 0;
}
```

1213D2 - Equalizing by Division (hard version)

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n, k;
cin >> n >> k;
vector<int> a(n);
for (int i = 0; i < n; ++i) {
cin >> a[i];
}
vector<vector<int>> vals(200 * 1000 + 11);
for (int i = 0; i < n; ++i) {
int x = a[i];
int cur = 0;
while (x > 0) {
vals[x].push_back(cur);
x /= 2;
++cur;
}
}
int ans = 1e9;
for (int i = 0; i <= 200 * 1000; ++i) {
sort(vals[i].begin(), vals[i].end());
if (int(vals[i].size()) < k) continue;
ans = min(ans, accumulate(vals[i].begin(), vals[i].begin() + k, 0));
}
cout << ans << endl;
return 0;
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n;
string s, t;
cin >> n >> s >> t;
string abc = "abc";
vector<string> res;
do {
string cur;
for (int i = 0; i < n; ++i) cur += abc;
res.push_back(cur);
res.push_back(string(n, abc[0]) + string(n, abc[1]) + string(n, abc[2]));
} while (next_permutation(abc.begin(), abc.end()));
for (auto str : res) {
if (str.find(s) == string::npos && str.find(t) == string::npos) {
cout << "YES" << endl << str << endl;
return 0;
}
}
assert(false);
cout << "NO" << endl;
return 0;
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n, k;
cin >> n >> k;
vector<int> p1(n), p2(n);
for (int i = 0; i < n; ++i) {
cin >> p1[i];
--p1[i];
}
for (int i = 0; i < n; ++i) {
cin >> p2[i];
--p2[i];
}
set<int> vals1, vals2;
vector<int> rb;
for (int i = 0; i < n; ++i) {
if (vals2.count(p1[i])) {
vals2.erase(p1[i]);
} else {
vals1.insert(p1[i]);
}
if (vals1.count(p2[i])) {
vals1.erase(p2[i]);
} else {
vals2.insert(p2[i]);
}
if (vals1.empty() && vals2.empty()) {
rb.push_back(i);
}
}
if (int(rb.size()) < k) {
cout << "NO" << endl;
} else {
string s(n, ' ');
int l = 0;
for (int it = 0; it < int(rb.size()); ++it) {
int r = rb[it];
char c = 'a' + min(it, 25);
for (int i = l; i <= r; ++i) {
s[p1[i]] = c;
}
l = r + 1;
}
cout << "YES" << endl << s << endl;
}
return 0;
}
```

**Tutorial**

Tutorial is loading...

**Solution**

```
#include <bits/stdc++.h>
using namespace std;
vector<int> p, rk;
int getp(int v) {
if (v == p[v]) return v;
return p[v] = getp(p[v]);
}
long long res;
long long get(int cnt) {
return cnt * 1ll * (cnt - 1) / 2;
}
void merge(int u, int v) {
u = getp(u);
v = getp(v);
if (rk[u] < rk[v]) swap(u, v);
res -= get(rk[u]);
res -= get(rk[v]);
rk[u] += rk[v];
res += get(rk[u]);
p[v] = u;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n, m;
cin >> n >> m;
res = 0;
p = rk = vector<int>(n, 1);
iota(p.begin(), p.end(), 0);
vector<pair<int, pair<int, int>>> e(n - 1);
for (int i = 0; i < n - 1; ++i) {
cin >> e[i].second.first >> e[i].second.second >> e[i].first;
--e[i].second.first;
--e[i].second.second;
}
vector<pair<int, int>> q(m);
vector<long long> ans(m);
for (int i = 0; i < m; ++i) {
cin >> q[i].first;
q[i].second = i;
}
sort(e.begin(), e.end());
sort(q.begin(), q.end());
int pos = 0;
for (int i = 0; i < m; ++i) {
while (pos < n - 1 && e[pos].first <= q[i].first) {
int u = e[pos].second.first;
int v = e[pos].second.second;
merge(u, v);
++pos;
}
ans[q[i].second] = res;
}
for (int i = 0; i < m; ++i) {
cout << ans[i] << " ";
}
cout << endl;
return 0;
}
```

Thanks for the quick editorial.

Thank You for the editorial

I think calculating answer for each possible query is also an option in the given constraints in G.

A different approach for F, time complexity is O(n). Consider each permutation number as a node and create a directed graph by making an edge between adjacent nodes of the permutations (doesn't matter if from left to right or right to left). Some cycles may appear and it's obvious that all nodes in a cycle have to have the same letter in a string. Then just get strongly connected components and each can have a different letter. If there's less components than necessary K, print NO, otherwise do a topological sort on those components and give them letters correspondingly. Here's my code: https://codeforces.com/contest/1213/submission/59754658

You don't even need to do a topological sort of the vertexes.

Start with some character as 'a' and then iterate through one given permutation. Every time you visit a new component (previously not seen yet) you can increase current character value.

My code: 59772918

Actually the topological sort is unnecessary because in Kosaraju's, the components come in topological order :D

To be a lucky contestant-

`/home/your-self/competitive-programming/templates/building-meta-graph.cpp`

)That piece of codeSubmission

SCC is easier to compute in this problem. If you order nodes by $$$p$$$, the only back edges are created by $$$q$$$. You do not need DFS.

My code is dirty but here's my submission: https://codeforces.com/contest/1213/submission/59731555

the idea behind the official solution is same as the idea you have suggested. From the point of implementation it is much easier in official solutions.In your approach there are many redundant information such as actual bonds between nodes from one component.

I didn't say it's a different idea, it's just a different approach to it with a longer code but it has a better time complexity so that's why I wanted to share it..

I see, sorry for misunderstanding your initial intention, I bring my apologize :)

Not sure if this is helpful now, but the solutions above are too much of code. A simple approach is using two pointers with the complexity of O(n) (code) Sorry, if someone has already shared a similar approach.

Can you elaborate the idea behind your solution? i couldn't reach the full solution with 2 pointers, thanks in advance :)

[Deleted]

For problem G, what if the queries are onnline?.

You can precalculate answer for each $$$w_i$$$ (weight of $$$i_{th}$$$ edge), if it will be a query. For each real query you can just find first $$$w_i$$$, that not greater, than weight from query, using binary search.

Actually online/offline solution share the same idea, right? Calculating the answer by merging two sets in increasing order.

Yes. You make offline solution for interesting weights, and then easily answer for given queries in online.

can you please explain, how to start with the problem(I have got some vague idea but can't understand completely)what are the component around the edges, if we start from beginning and how to proceed then, with a small example, that will we very helpful.this problem seems very interesting to me.

Look at the first example input.

At first every node makes a single set. Now we calculate the answer for $$$weight \le 1$$$. Merge two nodes if there's an edge connected them, and its weight is less or equal $$$1$$$. In this example we have two edges $$$<1,2>$$$ and $$$<2,4>$$$. After merging, the node sets become $$$((1, 2, 4), (3), (5))$$$. Following is similar.

Now we look at how to update the answer when merging two sets. Let two set's size be $$$a$$$ and $$$b$$$. Merging them will add $$$a+b$$$ to the answer because, that before merging: $$$C(a, 2) + C(b, 2)$$$, and after merging: $$$C(a+b, 2)$$$. The difference is $$$a + b$$$, if you extend them.

Sorry for my poor English. You are welcome to ask again if you got confusing.

for weight<=1, the only two edges are <1,2> and <2,4>. But why are 3 and 5 also in the set: ((1,2,4),(3),(5)).

Also, how is difference between [ C(a,2)+C(b,2) ] and C(a+b,2) equal to a+b?

In fact, the difference in difficulty of this topic is still somewhat large. However, the idea of setting two difficulty points in D is very good. I hope that the difficulty of the problem will be smoother in the future.

Hi. Could someone please tell me why, in question D1, the number of possible candidates is O(nlogn) and not O(nlog(max(Ai)))?

max(Ai) <= maxn , I guessed.

But isn't n<=50, while (Ai) <= (10^5)?

Why D2 don't get tle ? We have 1e5 values and we are summing k values so k*1e5 ==> 1e5*1e5 ?

No, its not like that. Lets suppose you have k == n then only 0 or 1 will be able to satisfy the greater than condition. so the total complexity in that will be at max 8*10^5. Similarly you can check for other cases as well.

look at the solution We doing sorting before checking the condition

for (int i = 0; i <= 200 * 1000; ++i) { sort(vals[i].begin(), vals[i].end()); if (int(vals[i].size()) < k) continue; ans = min(ans, accumulate(vals[i].begin(), vals[i].begin() + k, 0)); }

We have at most $$$n=2*10^5$$$ numbers. Each number will contribute to $$$O(\log_{2}(2*10^5))$$$ arrays, when we divide it by $$$2$$$ until $$$0$$$. So, at worst case we have $$$O(n*\log_{2}(2*10^5))$$$ numbers in the arrays in total. So we never have to sort, and sum more numbers than that, no matter what $$$k$$$ is.

For the

problem D1, I don't understand the statement:We can see that there is always O(nlogn) possible candidates because all values x are among all possible values of ⌊ai2m⌋ for some m from 0 to 18. So we need to check each candidate separately and try to update the answer with it.Also, how are we selecting the value for x? is it in the sequence 2,4,8... or do we calculate x in some way and then use it to check the elements in array which can be reduced to it.

Thanks. :)

I can tell you the idea. Basically it is a brute force, we try to check every possible x, so we loop from zero to 2e5 and check how many moves can we make to make x and then take the minimum

I saw the solution code after reading your reply. It makes sense to me now. Thank you!

Our goal here is to divide some numbers from this array by 2 in order to obtain $$$k$$$ equal elements. Let's say that the required number is $$$x$$$, this number is equal to one of the numbers in the final array; since any number in the final array is the result of dividing it by $$$2^p_i (0 <= p_i)$$$, where $$$p_i$$$ is the number of time we divided the $$$i-th$$$ number by 2, then $$$x$$$ is of the form $$$\lfloor a_i/2^p_i \rfloor$$$. Now, we know that there are $$$n$$$ numbers and in worst case for input size and values the maximum number in the array is equal to $$$n$$$, so each number can be divide by at most $$$\lfloor log(n) \rfloor + 1$$$ and we have $$$n$$$ numbers, so the number of possible choices for $$$x$$$ is $$$O(n*log(n))$$$.

It is known that there are n elements in the array. But why is the value of largest element n in worst case.

What if for example we had 3 elements. Thus n=3 but if the the largest element in it is 445 (which is much greater than 3).

I said the worst case for

input sizeand values.If we are already denoting n as the number of elements in the array, how can we also use n to denote the largest value of array?

Here is what I am understanding:

For input size

n. Let's have the largest element in array denoted bym.we can divide each number by at mostIn the worst case,⌊log(m)⌋+1and this is to be done for n elements. Thus, worst case time complexity:O(n*log(m))Yeah, that's right, but for the sake of simplicity we are just using $$$n$$$.

OK. Thanks

Can anyone tell me what's wrong in my code for G. It is failing on the 4th test case. I have done the exact same thing as the editorial suggested.

link

Array fin_ans should be long long.

Thankyou so much :)

Can some plz explain to me the approach for problem-C? Thanks!!

For the sum of the unit's digit, the whole divisor isn't important, only the units place is useful. All digits(0 to 9) tend to follow a pattern when multiplied with other digits(0 to 9). Kind of cycle which they keep on repeating.For example...

1) all multiples of zero will end with 0.

2) all multiples of an odd number other than 5 can end with any digit 0 to 9.

3) all even number end with 2,4,6,8,0.

4) all multiple of 5 ends with either 0 or 5.

Now try comprehending this... https://codeforces.com/contest/1213/submission/59745010

Notice that the last digit pattern repeats after 10*m for m. So we can calculate last-digit sum for m,2*m,..10*m(call it s). This will repeat n/(10*m) values. Then we can calculate the ans as s*(n/(10*m) + digit sum for (n — [n/(10*m)]*10*m).

In G you can also just do $$$res := res + s_{1} s_{2}$$$, since exactly $$$s_{1} s_{2}$$$ new paths are formed by adding the new edge. Indeed,

$$$\binom{s_{1} + s_{2}}{2} - \binom{s_{1}}{2} - \binom{s_{2}}{2} = \frac{(s_{1} + s_{2})(s_{1} + s_{2} - 1) - s_{1}(s_{1} - 1) - s_{2}(s_{2} - 1)}{2} = \frac{s_{1}^{2} - s_{1} + s_{2}^{2} - s_{2} + 2s_{1}s_{2} - s_{1}^{2} + s_{1} - s_{2}^{2} + s_{2}}{2} = s_{1} s_{2}$$$.

Hmm, this seems nice and there is very intuitive way to understand this formula. In fact, you have some paths in the first component and some paths in the second component. And all paths you need to add are the paths between the vertices of the first component and the vertices of the second component. Obviously, there is exactly $$$s_1 s_2$$$ such paths. Thanks for providing this formula, I didn't thought about that :)

Vovuh Can you explain why my approach is wrong for D2

I run a loop from j = 1 to j = 200005 considering each j as an optimal value which will be our k equal elements. Now for each j I calculate the minimum number of steps required to get k occurences of j.

For that I have sorted the vector already and follow this startegy

Suppose I want to make x as my optimal value , then in 1 step i can get x from elements which are in the range 2*x to 2*(x+1)-1 in 2 steps i can get x from elements which are in range (4*x) to 4*(x+1)-1 in 3 steps i can get x from elements which are in range (8*x) to 8*(x+1)-1 .... and so on.

to get the number of elements in a range I use upper_bound and lower_bound on my sorted array.

Why is this approach failing for test case 4?

code -> code

Never mind found my mistake

Can G be solved without DSU?

I think even if such approaches exist, they will get TLE

[Deleted]

All people who managed to solve E problem, I just want to know what struck you? what idea came to your mind. Do you think you solved it because you have solved similar problems in the past... if so do share them?

I am not sure that I have seen any similar problems before, but almost as soon as I looked at the problem it seemed to me likely that the result would simply be a pattern repeated n times, so one only needed to look at all the different cases for s and t. I then realised one only had to look at two values for s ('aa' and 'ab') since every other value can be transformed into one of these by swapping letter names.

This then gave me a number of different cases for the relationships between the characters of s and t, each of which had a reasonably obvious solution.

In the end my first solution failed the tests, but it was then quite easy to write a small test for my code going through all possible values of s and t to work out what case I had missed (maybe I should have done this before submitting the first time!).

cool, makes sense!

I think there is an O(N) solution to F (sadly not completed during the contest). See https://codeforces.com/contest/1213/submission/59767381.

My code first inverts the q permutation so that it maps s indexes to q values rather than the reverse; then by working backwards through the p values I can find, for each index of s, the minimum q value corresponding to any later p value.

Finally, it fill in s in p value order keeping track of the maximum q value associated with the letters of s filled in so far. It starts with filling in with 'a's. Whenever the maximum value it has seen is less than the minimum value q value associated with later p values it can move on to the next letter. This continues until it has k different letters, at which point it simply fills in the remaining characters of the string with the final letter.

I think there's an O(n) solution too, i was thinking of building an oriented graph of n nodes and for each permutation take any two neighbour values and add an edge between first and second. For example if 123 and 132 are the permutations you'll have edges 1->2, 2->3, 1->3 and 3->2. Then notice a strongly connected component will have the same letter throughout and the problem is reduced to assigning letters to each connected component

Here's a simple linear implementation without using any graph ideas.

https://codeforces.com/contest/1213/submission/59773171

Here is in my opinion the simplest O(n) solution, which uses a bitset.

59775080

please, explain the idea.

It's probably the same as in the editorial, but checks whether the two sets are equal by maintaining the size of their symmetric difference. Here's my code based on the same idea: 59794369.

The symmetric difference is just the number of positions where the two sets differ, therefore it is zero exactly when the sets are equal. Maintaining which positions have appeared in exactly one permutation and the symmetric difference makes updates easy. Whenever the symmetric difference is zero, as in the editorial you can move to the next character.

I have been trying to solve at least problem 'A' in last AUGUST month. But always failed to solve even 'A'. Maybe I am not even considered as a 'newbie' in problem solving.

A part of overcoming newbie region is to just stop underestimating yourself and overestimating problems. :)

Another way to look at D2, we consider every value from 1 to MaxVal in array to be the optimal value and choose the best ans.

1. Mark the frequencies of all elements in an array.

2. Consider all values from 1 to MaxVal.

3. For value i we do a BFS on a tree to get the cost.

The tree here is a binary tree with root node as 1 and the child of node i are 2i and 2i+1.

A node at same level as Source node i of BFS will take 0 cost to convert to i, nodes at 1 level under will cost 1 to turn into i.

So the BFS will run till: either we have found k elements or it is not possible to make k elements equal to i. Time Complexity : MaxVal log (MaxVal) https://codeforces.com/contest/1213/submission/59750061

From what I can tell that BFS is actually $$$O(m \log^2 m)$$$ (where $$$m = max(a)$$$), since the number of nodes to search at each "level" grows by one each time. That's also roughly the solution I implemented during contest time, except that I noticed that the numbers of each level of the tree are consecutive, so I used a BIT for the queries: #59749502

While looking through other solutions I found one that was quite interesting, but I don't remember who wrote it:

Asymptotics $$$O(n (\log n + \log m) + m)$$$

Sample code I wrote after contest time based on this idea: #59761739 (instead of two arrays, I just use one array here with the two variables as a tuple)

Hi, I dont understand why the complexity will be mlog2m.

here is my analysis:

a bfs on node 1 will be O(m).

a bfs on node 2 will be O(m/2) and on node 3 will also be O(m/2). a bfs on node 4,5,6,7 will O(m/4).

from this I conclude that for all nodes at a given level BFS cost summed is O(m).

also number of levels in the tree will be logm.

So i feel overall m*logm should be the time complexity, correct me if I am wrong.

You may be right; I didn't consider that searching the higher numbers might be shorter in a way that is material to the asymptotic analysis. It makes me curious as to what the best asymptotic estimate for my BIT variation would be...

in problem E it's for permutations of c_1 ,c_2,c_3 it's enough to only consider abc & acb because all others permutations will contain the same sequence of repeating letters by just rotating these two permutations so we can only narrow down our search space instead of 12 to be 8

Test 2 ac ab Answer bbccaa

In problem E Tutuorial, "Then let's add the following two candidates to the answer: "c_1 c_2 c_3 c_1 c_2 c_3 ... c_1 c_2 c_3" (the string consisting of n copies of "c_1 c_2 c_3") and "c_1 ... c_1 c_2 ... c_2 c_3 ... c_3" (exactly n copies of c1 then exactly n copies of c2 and exactly n copies of c3). Then the answer will be among these 12 strings and we can check each of them naively."

how can you say only this permutations will be enough? and why we are just concatanating each permutation of 'abc' n times ?

if you write all possible s & t and for C_1,C_2,C_3 you will find that using abc and acb will not pass if either of s or t is (ab,bc,ca ) or (ac ,cb,ba) respectively for other permutations like for example (bac) it will be the same as above it will not pass of s or t is (ba or ac or cb) the same as acb because it's only the rotation of acb so now we have for the permutation of acb only 2 options (the other 4 are redundant) then you consider using C_1C_1C_1 C_2C_2C_2 C_3C_3C_3 each C_i n times and their permutation so here u will get 6 more options

I think I got it:

here I print out all possible s and t values, and then using rotations like: ab and bb is the same as ac cc, ba aa, and so on, I minimize cases count (10 total)

Then I manually went through all the combinations and found out that combinations of c1c2c3*3 and c1*3+c2*3+c3*3 is enough to build a valid string

seems that 'NO' never can happen (it would be obvious if I notice assert(false) in example earlier)

You can read my analysis here)

Can someone please help me with understanding the time complexity of "1213D2 — Equalizing by Division (hard version)". I understood the time taken to prepare the array

cntinO(nlogn). After that the process of finding the sum of k smallest values for all values of x should take anotherO(n*nlog(n))since for each x [0 to 2*10^5], we need to sort the array of values (the count of these can again be upto n ie. upto 2*10^5 takingO(nlogn)) to obtain the smallest k values? Please let me know where am I getting it wrong? Thank You.Lets save our numbers like this pair ( val, iteration when we get it) Then we sort our pairs, it will rake nlogn Then first equal k val's sum of iterations is minimum to reach this val

By nlogn, you mean per val. Correct? Eg: val of 0 will be present for each value of x and therefore will take nlogn for finding the smallest k iterations. Similarly for all the other vals (0 to 2*10^5)

It can't be true that for each of MAX=2*10^5 values size of vector will be 2*10^5 because we have total length of all vectors <= MAX * 18 (each x can be used not more than 18 times). so if we have 18 vectors of length MAX total time of sorting them will be 18*MAX*log(MAX) = A if we have 36 vectors of length MAX/2 total time of sorting them will be 36*MAX/2*log(MAX/2) = 18*MAX*log(MAX/2) <= A so worth case is 18*MAX*log(MAX)

Is this the other correct way to solve problem A?59723491

Can someone explain that?

It's correct. Answer for A, is min from odd point and even points. This solution find answer, becouse it looks "what is better odd or even point?"

Oh,I see.It's the different way:-)

I solved B with Monotone stack :)

Well.. now I know how to solve problem F. but.. WHY to do so??(#｀-_ゝ-)

I had tried to understand the tutorial's solution for hours but failed.. Could someone give me a explanation? thx ( •̀ ω •́ )✧

What is wrong with my solution for D2 . My solution was also O(nlogn) but got TLE while submitting... Can someone point out the mistake my submission — https://codeforces.com/contest/1213/submission/59748079

AFAIK worst case of Arrays.sort() is $$$O(n^2)$$$ and not $$$O(n logn)$$$

Converting it to merge sort will make difference?

Maybe because I think there is no other way that your code gets TLE

ok yes, it works... poor me :p got a lesson — never believe on quick sort

Java's sort function is sometimes quite unreliable. So try to avoid it. :)

You can read this. https://letsdocp.wordpress.com/2018/06/22/java-in-competitive-programming/

that's hella strange tbh, in C++ the time complexity of std::sort() is guaranteed to be O(n logn)

Yeah, I kinda wish Java would switch to something like introsort for primitive arrays. This issue also affects Kotlin environments when targeting the JVM (which is currently the most common flavor supported).

Another possible workaround is to shuffle the array before sorting, but... Java's Collection.shuffle() doesn't support primitive arrays (or regular arrays for that matter), forcing you to implement Fisher-Yates yourself for each type of primitive array you need to work with.

In problem C, my approach was similar, but got WA in test-case 4. Can someone please check what is the error:

Well, you check if (k > 0) where k = n / m. So, even if k = 10, 20, 30... you always add arr[0-1] to your answer, which is an undefined value.

Before the condition you can write k = k — k/10; Here, you get reminder of division k by 10.

In G, what are the component of edges and how are we merging them, any easy way to understand it? an example would be helpful.

very good

Missed expert by 8 points :(

Is it possible to solve G for online queries? Did anyone solve?

I think there is a better solution for F.Time complexity:$$$O(n)$$$

We can build a graph as follows: for each $$$s[p_{i}]\le s[p_{i+1}]$$$,$$$s[q_{i}]\le s[q_{i+1}]$$$, we can connect $$$p_i$$$ and $$$p_{i+1}$$$ . Similarly we connect $$$q_i$$$ and $$$q_{i+1}$$$ .

It is easy to prove that we should set the same letter in those positions which are the points on one cycle on the graph that we built.Then we can use

`Tarjan`

to "Shrink point", thus we can get a`DAG`

.We can think of the DAG as

`a layered graph`

. Because we have to use at least $$$k$$$ letters, if the graph doesn't have at least $$$k$$$ layers, it's impossible to construct such an answer.Then we give each layer one letter in turn. If there are more than 26 layers, just set those layers as letter 'z', it's not difficult to prove it.

Time complexity:$$$O(n)$$$

code 59775323

// I used map to remove duplicate edges, this is not necessary.

Can someone give small test case like TC-20 for problem F? Nvm was a silly mistake.

Duh, in problem C i failed in test 6 when was using the python, and the approach seems to be fine, but I didn't have time to reimplement my solution in C++. Today when I reimplemented it with C++ it passed all tests -------------_________________------------------

btw commented python total calculation is my original formula and it works in cpp, even with algorithm from author python solution didn't work

This is a total disaster...

I suspect that

`math.floor(n/m)`

is the culprit, and changing it to`n//m`

may solve the issue.Internally,

`math.floor`

should use floating point arithmetic with double precision, leading to inexact results when used to calculate large integers.Thanks a lot, let me check that

Exactly, that was the issue. Good experience for me, thanks

59835828

Why in the D2 we sort and then check the size? Would be more efficient to check the size first and the sort. Please correct me if I am mistaken.

You are right.

Yes you are right, the author might have made a blunder there. But all's right until we are within the time limit. Another mod to the solution can be sorting a[n] just after input. View my solution for the problem: Problem 1213D2

can anyone please tell me why am i getting TLE at testcase number 6 for poblem G. I am using DSU here is my code link

You are doing the merge operation without even looking the size of two sets and that is slow.

Use operation merge of two sets by size/rank using the size array.

thanks a lot

Use path compression, here is your modified code (i only change the root() function): 59810426

thanks a lot for the help, btw i thought even without path compression it only takes log(log(N)) time to find root of a node so it may not cause any problem.

for the problem E how someone will come up with this kind of solution and can be so sure all the ans lies in these 12 pattern. its very difficult for someone to think this way. pls help with your approach.

I have written an analysis to arrive at the solution. You can read it here)

My code(problem d2) work in 77 ms. 59748709

Anyone please tell me... What is the intuition behind the editorial of Problem F?

In E how one can prove that, by following algorithm we won't miss any possible permutation? can somebody prove that by that construction it is impossible to miss any permutation of 3n numbers(also considering constraints in the problem)

You can read this comment

thanks

For problem E. I think the intuition behind it are as follows:

First, we have to consider two types of test cases for this problem. These are:

I.) When both of the strings s and t have two distinct characters each. Example: (ab, bc).. etc.

II.) When one or both strings contain only one distinct character. Example: (bb, ca) and (aa, bb)

Suppose that we have two input strings of type I, and for simplicity we use n >= 1, say n = 2. Let's see the invalid strings for each permutation that we will generate. Let's call this group of strings GROUP A.

abcabc — (ab, bc, ca) bcabca — (ab, bc, ca) cabcab — (ab, bc, ca)

acbacb — (ac, ba, cb) bacbac — (ac, ba, cb) cbacba — (ac, ba, cb)

We have 6 potential answers. And each three share the same invalid sub string, unfortunately all of these combinations will fail if we have a test case where the strings are both invalid for the first and last three of this generated answers. For example, (ac, ab) or (ba, ca) won't work for any of these 6.

The remedy for this is to group each of the distinct n characters together and make a permutation out of it. Let's call this group of string GROUP B. Again, for n = 2, we get:

aabbcc — (ab, bc) aaccbb — (ac, cb) bbaacc — (ba, ac) bbccaa — (bc, ca) ccbbaa — (cb, ba) ccaabb — (ca, ab)

This now reduces the invalid sub strings for each permutation to two. This solves test cases of type I.

For type II test cases. We can simply select a sub string from GROUP A. Because we only care about at most one (or none) strings with two distinct characters, we just select a string configuration where the strings s and t aren't invalid.

In conclusion, strings of GROUP A are used to generate potential solutions for type II test cases, and strings of GROUP B are used to generate potential solutions for type I test cases.

For C I used similar approach as in editorial but getting memory limit exceeded. Can anyone of you help me https://codeforces.com/contest/1213/submission/59756440

## Proof of Correctness of E

If you want the graphs to embedded within the text, you can read this link

Let us draw a graph on 3 vertices, namely

`a`

,`b`

, and`c`

. There is an edge from`i`

to`j`

if it is permissible to go from`i`

to`j`

. Notice that there can be self loops. Initially, the graph looks like this.Initial Graph

Now, as soon as we receive a query, we need to remove the corresponding edge in the graph. For example if the string

`s`

is`ab`

, it means we won't be able to go from`a`

to`b`

as it is now forbidden. Similarly, if the string`t`

is`cc`

, it means that we need to remove the self loop of`c`

. It is clear that we need to remove exactly 2 edges from the graph, and after that if we can traverse the graph such that all the vertices are visited exactly`n`

times, and at the same time, ensuring that we travel only on permissible edges, then we can get our answer.Case 1--> We remove 2 self loops.Graph of Case 1

Observe that we can travel the graph in the path

`abc abc ...`

.Case 2--> We remove 1 self loop and 1 normal edge.Graph of Case 2

WLOG, assume that the directed edge from

`a`

to`b`

has been removed. This means that if we start at`a`

, we can't directly go to`b`

. Fine, Let us start from the vertex which isn't reachable in one move, i.e`b`

. We traverse the graph in the manner`bac bac ...`

.Case 3--> We don't remove any self loops. For each edge`i --> j`

which is removed, we will call`i`

as being marked.Sub Case 1⇒ Only 1 vertex is markedGraph of subcase 1

WLOG, Suppose, only the vertex

`a`

is marked, this means that both outgoing edges from`a`

is removed. We can traverse the graph in the order`ccc... bbb.... aaa...`

Sub Case 2⇒Graph of sub case 2

2 Vertices are marked. Suppose they are

`a`

and`b`

. If we cutoff both the link of`a`

and`b`

, we can traverse the graph in the manner`acb bca acb ....`

. So now, the only case remaining is the one where the edge`a ---> b`

is cutoff, and the edge`b --> c`

is cutoff. Notice that we can still traverse the graph in the manner`acb acb ...`

Very helpful observation, thank you !

Superb, thanks a lot.

Great observation

In Problem D1, my approach was similar to the editorial. I sorted the array , now we traverse it and for each element we see that with the next(higher elements after it as array is sorted) elements can give this a[i] element or not. If yes, we add the number of divisions("cost" variable) needed. Repeat till we get k similar elements.(if possible) Repeat this process for all elements of array.

Now we have min divisions needed to form a number. k times. which already existed in original array also. in "ans" variable. Now I checked minimum divisions needed to make k Zero's. Printed minimum of them both.

It Fails on 5th case.Please Help! 59830286

please look at my code : https://codeforces.com/contest/1213/submission/59874621 my approach for D1 : but unaccepted at 5th case 1. store the elements with their frequencies in map mp 2. store the element with value = 1 in map mp1 3. find the max frequency -- if(maxFreq >= k) return 0; above 3 can be done in one loop of O(n) 4. change the frequencies from original to (k-original) 5. iterate over the array 6. store a[i]/2 in New 7. while(New>0) -- if New = present in mp -- decrease it's frequency by 1 -- increase value for New in mp1 by 1 -- & when New's freq = 0 --> cal no of a[i] require using mp2's value for New & compare with Min -- now delete New from both maps bcoz we want least k I can't figure out what am i doing wrong help please -- it means a lot

Why the E problem solve is that? Why is the permutations of the string "abc" ? help me. thanks

I think that's the most efficient way --> bcoz total permutation will be 12 of abc --> TC = O(12)

In E-Two Small String it is mentioned that "If there are multiple answers, you can print any of them." Then why I am getting WA in test case 4. here is my submission -https://codeforces.com/contest/1213/submission/59881371

I was unable to solve D1 and D2 problems during the contest period but was able to upsolve it later, could anyone tell the time complexity of the code:

I', confused whether it will be

`O(N) {O(4*N)}`

or`O(N*logN)`

or something else.For problem F, what is the approach or intuition behind Vovuh solution in the editorial? Although, I am able to understand what is done while inserting/removing from the set until they are equal but not able to understand why this approach is used and what will be the mindset for similar future problems?

As, both permutation leads to same string when sorted.. therefore the segment [l,r] in string1 and string2 should be a permutation of each other ( and hence should have same character ) therefore we are finding such segments & assigning them the same character.

excellent editorial

This O(n*n*logn) solution ( https://codeforces.com/contest/1213/submission/60268220 ) takes ~ 217 ms . However an identical but faster O(n * n) solution (https://codeforces.com/contest/1213/submission/60267572) takes about ~1800 ms and also more memory. Please explain what's wrong? Is the complexity analysis wrong or what?

Thanks for this.

If I am not mistaken, I found a solution for D2 with O(N * log2(max(a[i]))) complexity. Sort of O(N * 20) ~ O(N). That is the same complexity with such limits as O(N * log2(N)), but maybe it would be interesting for someone. I've used binary prefix tree — http://codeforces.com/contest/1213/submission/60412397

In Question E (Two small strings), I am getting wrong answer on fourth test case

1 ab cb

judge's answer is NO but I think we can construct res as "bac".

Please help.

actually the judge's answer is YES

I am getting wrong answer on this, I can show you also.

https://codeforces.com/contest/1213/submission/60527758

It shows you: your output is NO and jury's ouptut is YES bac

D1/D2:There exist

`O(max_value)`

solution for this problem, which is`O(n)`

by the problem statement.We create

`LOGN`

arrays of sizes`MAXN`

,`MAXN/2`

,`MAXN/4`

, ...,`1`

. Array number`j`

will store (under index`i`

) how many numbers are there that require exactly`j`

divisions to get to value`i`

.We see that

`Array[j][i] = Array[j - 1][2 * i] + Array[j - 1][2 * i + 1]`

, and`Array[0][x]`

= number of occurences of`x`

in the input.Now, for each candidate value

`x`

in range`[1 .. MAXN]`

we only need to greedily take first`k`

cheapest elements that leads to`x`

. We do this by taking elements group by group until we get total of`k`

elements.This way, we only once compute values for each array element and only once retrieve value from the array. Note, that the total size of the array is

`MAXN + MAXN/2 + MAXN/4 + ... + 1 <= MAXN * (1 + 1/2 + 1/4 + ...) = 2 * MAXN = O(MAXN)`

.My C++ code for this solution: submission/60682312. It takes 62 ms to execute.

will anyone please tell me where in the question Chips moving it is mention that only chips having odd values are supposed to move. In the question it was mentioned that 2 steps jump for 'i' is free and 1 step jump costs 1 coin.

I can't solve problem D2 (D1 too) in the contest because of my poor complexity analysis. At the time, I thought the complexity was $$$O(n^2 log(n))$$$ and I refused to solve it. Now, I think the complexity exactly is

$$$T = x_1log(x_1) + x_2log(x_2) + ... + x_nlog(x_n)$$$ with $$$x_1 + x_2 + ... + x_n = n$$$.

I want to ask you how to prove: $$$T <= n\ log\ n\ log(n\ log\ n)$$$

i solved D2 using dynamic programming.I created a 2D array a[2.10^5][20]. wherer a[i][j] represent number of element which became i after dividing it j times by 2.

link to my solution: https://codeforces.com/contest/1213/submission/62255328

Another way to think of problem D (or D2 specifically) is to put the numbers [1,2,...,aMax] in a complete binary tree where node i has children 2*i and 2*i+1.

Then the process of equalizing k numbers is equivalent to: choose a subtree of size at least k, with root, say, x; denote Level 0 of the subtree is the number of occurrences of x; Level 1 of the subtree is the number of occurrences of children of x; Level 2 of the subtree is the number of occurrences of grandchildren of x; and so on.

To get the cost of equalizing this subtree, you (greedily) sum 0*(#x's)+1*(#children of x)+2*(#grandchildren of x)+... until you have added up k distances.

It is possible to calculate subtree sizes in linear time, and to build an N-by-log(aMax) matrix descendants[][] where descendants[x][n] = # of generation-n children of x, in O(N log aMax) time.

I have a solution deffer from the writer in problem D. However , I can not make it come true with many times try and desn't now where am I wrong. (I am not good at English...) First of all, use a bucket to storage the arr that it provided.(also storage the original array and sort it) Then enumerate the arrey

a, try to times 2(at most 20 times). when observe a number , we hace an interval where numbers have the same value to that number we observe.For example , when we observe 5($$$101$$$ in binary) ，try to times 2 at 2 times , the interval is $$$[40,47]$$$($$$101000$$$,$$$101111$$$ in binary) , so just query the prefix sum we can solve it in $$$O(n log n)$$$. Am I wrong or what I said clear?1213C - Book Reading For 1213C — Book Reading, I didn't quite understand the mathematical explanation. Can anyone please explain the mathematics behind the solution for 1213C with further elaboration? I'd be grateful.