Tutorial is loading...
solution
#include<bits/stdc++.h>
#define inf 0x3f3f3f3f
#define maxm 100005
#define maxn 2005
#define PII pair<int, int>
#define fi first
#define se second
typedef long long ll;
typedef unsigned long long ull;
using namespace std;
const double pi = acos(1);
const int mod = 998244353;
const double eps = 1e10;
const int N =1e2+10;
int n;
int a[N];
int main() {
ios_base::sync_with_stdio(false);cin.tie(0);cout.tie(0);
//int a,b,c;
int t;
cin>>t;
while(t){
cin>>n;
for(int i=1;i<=n;i++)cin>>a[i];
int res=a[1];
for(int i=2;i<=n;i++)res&=a[i];
cout<<res<<endl;
}
return 0;
}
Tutorial is loading...
solution
#include <cstdio>
using namespace std;
const int N=105;
int t,n,cnt;
char s[N];
int main()
{
scanf("%d",&t);
while (t)
{
cnt=0;
scanf("%d",&n);
scanf("%s",s+1);
for (int i=1;i<=n;i++)
cnt+=(s[i]!='?');
if (!cnt)
s[1]='R';
for (int i=2;i<=n;i++)
if (s[i]=='?'&&s[i1]!='?')
s[i]=s[i1]^('B'^'R');
for (int i=n1;i;i)
if (s[i]=='?'&&s[i+1]!='?')
s[i]=s[i+1]^('B'^'R');
printf("%s\n",s+1);
}
return 0;
}
Tutorial is loading...
solution
#include <bits/stdc++.h>
#define maxn 100086
using namespace std;
int t, n;
int a[maxn];
void solve(){
scanf("%d", &n);
for(int i = 1;i <= n;i++) scanf("%d", &a[i]);
if(a[1]){
printf("%d ", n + 1);
for(int i = 1;i <= n;i++) printf("%d ", i);
return;
}
for(int i = 1;i < n;i++){
if(!a[i] && a[i + 1]){
for(int j = 1;j <= i;j++) printf("%d ", j);
printf("%d ", n + 1);
for(int j = i + 1;j <= n;j++) printf("%d ", j);
return;
}
}
for(int i = 1;i <= n;i++) printf("%d ", i);
printf("%d ", n + 1);
}
int main(){
scanf("%d", &t);
while(t) solve(), puts("");
}
Tutorial is loading...
solution
#include<bits/stdc++.h>
#define maxn 2005
#define fi first
#define se second
#define PII pair<int, int>
using namespace std;
typedef long long ll;
const ll mod = 10007;
inline ll read(){
ll x = 0, f = 1;char ch = getchar();
while(ch > '9'  ch < '0'){if(ch == '') f = 1;ch = getchar();}
while(ch >= '0' && ch <= '9'){x = x * 10 + ch '0';ch = getchar();}
return x * f;
}
int n, m1, m2, f[2][maxn];
int getf(int id, int x){return x == f[id][x] ? x : f[id][x] = getf(id, f[id][x]);}
vector<PII> ans;
int main() {
n = read(), m1 = read(), m2 = read();
for(int i = 1; i <= n; i++)
f[0][i] = f[1][i] = i;
for(int i = 1; i <= m1; i++){
int u = read(), v = read();
int fu = getf(0, u), fv = getf(0, v);
f[0][fu] = fv;
}
for(int i = 1; i <= m2; i++){
int u = read(), v = read();
int fu = getf(1, u), fv = getf(1, v);
f[1][fu] = fv;
}
for(int i = 1; i <= n; i++){
for(int j = i + 1; j <= n; j++){
if(getf(0, i) != getf(0, j) && getf(1, i) != getf(1, j)){
ans.push_back({i, j});
f[0][getf(0, i)] = getf(0, j);
f[1][getf(1, i)] = getf(1, j);
}
}
}
printf("%d\n", ans.size());
for(auto i: ans) printf("%d %d\n", i.fi, i.se);
return 0;
}
Tutorial is loading...
solution
#include<bits/stdc++.h>
using namespace std;
#define N 100010
int fa1[N],fa2[N];
set<pair<int,int> > rows;
set<int> row[N],col[N];
set<int>::iterator it;
map<int,int> mp[N];
pair<int,int> Ans[N];
int getfa(int *fa,int x){
if (x==fa[x]){
return x;
}
return fa[x]=getfa(fa,fa[x]);
}
void Merge_row(int x,int y){
for (it=row[y].begin();it!=row[y].end();it++){
mp[x][*it]=mp[y][*it];
row[x].insert(*it);
col[*it].erase(y);
col[*it].insert(x);
}
}
void Merge_col(int x,int y){
for (it=col[y].begin();it!=col[y].end();it++){
mp[*it][x]=mp[*it][y];
col[x].insert(*it);
row[*it].erase(y);
row[*it].insert(x);
}
}
int main(){
int n,m1,m2,h=0,i;
scanf("%d%d%d",&n,&m1,&m2);
for (i=1;i<=n;i++){
fa1[i]=fa2[i]=i;
}
for (i=1;i<=m1;i++){
int x,y;
scanf("%d%d",&x,&y);
int p=getfa(fa1,x),q=getfa(fa1,y);
fa1[p]=q;
}
for (i=1;i<=m2;i++){
int x,y;
scanf("%d%d",&x,&y);
int p=getfa(fa2,x),q=getfa(fa2,y);
fa2[p]=q;
}
if (m1<m2){
swap(fa1,fa2);
}
for (i=1;i<=n;i++){
int p1=getfa(fa1,i),p2=getfa(fa2,i);
mp[p1][p2]=i;
row[p1].insert(p2);
col[p2].insert(p1);
}
for (i=1;i<=n;i++){
if (getfa(fa1,i)==i){
rows.insert(make_pair(row[i].size(),i));
}
}
while (rows.size()>1){
int x=rows.begin()>second;
rows.erase(rows.begin());
int y=rows.begin()>second;
rows.erase(rows.begin());
if (row[x].size()<row[y].size()){
swap(x,y);
}
it=row[x].begin();
int a=*it,b=*row[y].begin();
if (a==b){
a=*++it;
}
Ans[++h]=make_pair(mp[x][a],mp[y][b]);
if (col[a].size()<col[b].size()){
swap(a,b);
}
Merge_row(x,y);
Merge_col(a,b);
rows.insert(make_pair(row[x].size(),x));
}
printf("%d\n",h);
for (i=1;i<=h;i++){
printf("%d %d\n",Ans[i].first,Ans[i].second);
}
return 0;
}
Tutorial is loading...
solution
#include <bits/stdc++.h>
#define maxn 100086
using namespace std;
const int p = 998244353;
int n, m;
int l[maxn], r[maxn];
int f[maxn], sum[maxn];
int cal(int d){
int M = m / d;
f[0] = 1;
for(int i = 1;i <= M;i++) f[i] = 0;
for(int i = 1;i <= n;i++){
int L = (l[i] + d  1) / d, R = r[i] / d;
if(L > R) return 0;
for(int j = 0;j <= M;j++) sum[j] = (f[j] + (j ? sum[j  1] : 0)) % p;
for(int j = 0;j <= M;j++){
f[j] = ((j  L >= 0 ? sum[j  L] : 0) + p  (j  R  1 >= 0 ? sum[j  R  1] : 0)) % p;
}
}
int ans = 0;
for(int i = 1;i <= M;i++) ans = (ans + f[i]) % p;
return ans;
}
int prm[maxn], cnt, mu[maxn];
bool tag[maxn];
int main(){
scanf("%d%d", &n, &m);
for(int i = 1;i <= n;i++) scanf("%d%d", &l[i], &r[i]);
mu[1] = 1;
for(int i = 2;i <= m;i++){
if(!tag[i]) prm[++cnt] = i, mu[i] = p  1;
for(int j = 1;j <= cnt && prm[j] * i <= m;j++){
tag[i * prm[j]] = true;
if(i % prm[j]) mu[i * prm[j]] = (p  mu[i]) % p;
else break;
}
}
int ans = 0;
for(int i = 1;i <= m;i++) ans = (ans + 1ll * mu[i] * cal(i)) % p;
printf("%d", ans);
}
Am i only here who was too lazy to think about better solution at D1 than just writing full DSU?
Nope :D
why bother writing when you have your own code library :p

That's bad strategy mate. When u practice u write from scratch, when u r participating in contest u don't.
I know, that's my failure. That's why I just got +25 to my rating after solving 4 tasks :(
well, i did bad job at solving B, C too,I would consider it as bad contest(performance wise) but participating after one month without any practice so it was ok.
you werent lazy , i just copied dsu , and did some changes in code :D
maby yes ^_^
This was a lovely round. Thanks a lot to the problem setters (◍•ᴗ•◍)❤.
"Mocha and Math" problem editorial:
Well, if we set x = 0 initially, the final result will always be 0.
yes, it is wrong. we can't set it 0 initially.
It should be "x = ~0U" in C++
whats that?
'~' is the operator fliping every bit of an integer. For example, ~0U = 4294967295 .
Ah! ZYT!
x = 1 also does the same thing.
We can initialize x as a[0] and then iterate from i=1 to n1
Yes, we can
Why are we doing that though ? Could not understand the solution explanation for A.
ya it is a mistake in writing only...the code is correct
I set it to 1
It is now fixed; thank you!
I just make for loop in powers of two 125988309
Here's how to solve E without knowledge of Mobius function
I have a different approach to problem D2, which is simpler in my opinion.
First, try to add all edges $$$(1, x)$$$ for each $$$x$$$, after that, all nodes are in the component of node $$$1$$$ in at least one of the two trees.
If they are in the same component of node $$$1$$$ in both trees, we won't add edges from that node, since all nodes are in the same component than it in at least one tree.
Now we will consider nodes of two types, the ones that are in the same component than $$$1$$$ in the first tree, and the ones that are in the same component than $$$1$$$ in the second tree. We will store all nodes of type 1 in a stack $$$p1$$$, and all nodes of type $$$2$$$ in a stack $$$p2$$$, and we will try to match them with the following algorithm.
If the top of $$$p1$$$ is in the same component than $$$1$$$ in both trees, delete it
If the top of $$$p2$$$ is in the same component than $$$1$$$ in both trees, delete it
Otherwise, add an edge between the top of $$$p1$$$ and the top of $$$p2$$$.
Is possible to show that this algorithm will add the same number of edges that the one explained in D1's editorial.
The complexity is almost $$$O(n+m)$$$, since the solution only uses two DSU's, and stacks.
My implementation
My Solution is almost the same as yours, except I just maintain two pointers $$$p1,p2$$$: $$$p1$$$ is the smallest index in the same component as $$$1$$$ in first forest but not in second forest; $$$p2$$$ is the smallest index in the same component as $$$1$$$ in second forest but not in first forest. Keep adding edges $$$(p1,p2)$$$ until no edges can be added.
Thanks for this approach, but one more insight will clear it further,
When we have greedily added all possible edges with 1, now it is sure that if 2 nodes aren't connected in the final forest till now, then either of them must already connected to 1 in the separate graphs (126097226 line 162).
Instead of
We can just do
Thanks for selfexplanatory code !!
May I put and translate this lovely solution in my blog?
I'll certainly leave the link.
Sure, glad to see that you liked it.
Hey humbertoyusta, I din quite get how the stack part works... Could you pls explain it in detail ? i.e. how does taking all the elements in mocha's tree which does not belong to the same component as 1 and then randomly selecting a node from diana's forest which also dont belong to the same component as 1, works ? i.e. basically i believe even if we randomly permute the elements within each stack, it will work. Could you elaborate this part ?
You just need to try to match all the nodes who are in the same component than node 1 in the first tree, with the nodes who are in the same component than node 1 in the second tree.
You can do it in any order, due to the proof in D1's editorial.
So you just delete nodes who are in the same component than 1 in both trees(since is impossible to add an edge from them), and try to match the rest.
The way I implemented it, was maintaining two stacks, and trying to add an edge between the tops of the stacks is possible, otherwise delete the tops who are in the same component than 1 in both trees, for more detail you can check my implementation in the comment above.
Yeah. Cool. Got it. Thanks
"First, try to add all edges (1,x) for each x, after that, all nodes are in the component of node 1 in at least one of the two trees."
In the first graph: 1 > 3 , 3 > 2 and 4 is alone
In the second graph: 1 > 4 , 4 > 2 and 3 is alone
Node 2 is in the same component than 1 in both trees, node 3 is in the same component than 1 in first tree, and node 4 is in the same component than 1 in second tree.
In this case you just don't add any edge in this step since condition of every node is in the same component than 1 in at least one tree is already true.
Aaaa alright, I misunderstood the sentence.
Can we solve C using a Topological sort? Tried implementing it but didn't succeed
How would you deal with cycles?
I think we can keep track of the visited nodes and skip them if they've already been visited
What if a node can be visited using 2 different paths?
If you want to use topo sort, I think, we need to apply DFS for each and every node one by one...
Like for(node 1>node n)DFS(node)
Correct me If I am wrong.
Edit: It is possible to do it using topo sort... my bad
Tarun_19 solved it.
Link
I tried solving this question with two approaches but both failed
1) Finding the node of the first strongly connected components through which we can traverse all the node of the digraph.
Here
2) After the first approach failed, i just tried brute force since the sum of n over all test cases was 10^4. So after making the graph, i ran a loop from 1 to n+1 and tried which breaks whenever we can traverse all the nodes from the chosen node.
here
Atleast the brute force method should have passed the test cases. I still haven't figured out what is wrong with my code.
C was essentially asking for you to print out a Hamiltonian path in a special directed graph. In contest I remembered about a blog which has a heuristic algorithm for detecting Hamiltonian paths. Not sure if its even supposed to pass....
ORZZZZZZZZ TONY
Slight mistake in A : we need to set x = a[0] rather than 0 .
Accidentally got AC on D2 and couldn't find any similar solution 126013984
Q1: Is there any test that can turn AC to TL?
Q2: If not, can you give me some explanation (proof) why a and b is small enough?
Explanation:
1) I'm trying to connect all components with the component which contains vertex 0 2) For some vertices we obviously cannot do it. For example in test case 3 we get only 4 edges instead of 5
3) If we look closely on the vertices that we couldn't connect with vertex 0 there is a similarity between them. Both of them are separate components (Perhaps I unclearly express what I want to say. You can uncomment debug in my submission and look at DSUnions in the end of step 1 of explanation)
4) If a = count of such vertices in graph 1 and b = same for the 2nd graph. We can try to connect them for O(a * b)
I supposed that a * b is small that's why I submitted it.
This solution got 1.5s which is close to time limit which means that a * b is not as small as I expected.
I solve D with bitset .
you can save a bitset for each component , which is the set of elements which is not in this component .
when merge two components , you can simply "AND" two bitsets .
But the memory is $$$O(n^2/w)$$$ . You can do a simple optimization :
if the number of elements in the component is $$$\leq B$$$ , just use a vector to save all the elements .
otherwise , use a bitset .
Then you can consider 1,2,3...n , and check whether you can add an edge connecting i and a vertex not in the component .
So the memory is $$$O(n*n/(w*B)+n)$$$.
And the time complexity is $$$O(n*n/w+n*n/B+n*B)$$$,you can let B = $$$\sqrt n$$$ ,then it can pass.
solution : 125997167
If there is a valid vertex to join, then after AND operation there must be several bits left. How did you know at what positions those bits are?
Use _Find_fisrt and _Find_next in $$$O(n/w)$$$.
What's the point in making 256MB memory limit in a space complexity $$$O(n\log n)$$$ problem? It's really confusing. I was afraid of MLE on system tests so I resubmitted, losing rank 1 :(
Also I don't understand why E is even not rejected. It's "yet another trivial and straightforward gcd counting problem".
Sorry for trouble caused.
In problem D2, we think that the space complexity $$$O(n\log n)$$$ isn't that large when $$$n\le 10^5$$$. My submission only uses 26700KB and among all the testers the largest memory usage is 89200KB. So we just set a standard 256MB memory limit.
As for problem E, we didn't expect that it would be a trivial problem. That's why we say 'Sorry for my mistake in estimating the difficulty of problem D2 and E.' in the previous blog.
As the queue is too long, I didn't know I got a WA on E until the contest finished.
It's so sad that I got everything in E right except the last part. I didn't compute i from 1 to M but only the factors of M (cuz of some wrong thoughts when I first read the problem I saw the sum must be M instead of sum not greater than M) and I got wrong answer on sample test 3. I thought the complexity is gonna be $$$O(nM^2)$$$ if I loop i from 1 to M and forgot that for high values of i the complexity of the dp will also decrease. I can't think of solution for D2 so I skipped D1 as well to do E since I have rough idea of E but in the end I failed to complete both D and E...
A slightly alternative solution for E.
We can already count the dynamics from the editorial , but we will slightly change the calculation of the answer. Let's calculate for each $$$i$$$ $$$(1 \leq i \leq m)$$$ how many different primes there are in its decomposition, and is it true that each prime number is included in the power of $$$1$$$, if not, then we will not take it into account. Then if the number includes an even number of primes, then we will take it in the answer with a plus sign, otherwise with a minus sign.
Well, it works, since we just wrote the formula for inclusions of exceptions
This is the Mobius function, your solution is exactly the same as the editorial.
Except for D1 it was a good contest.
Show sympathy to those who got passed pretest in problem A and failed in system test cases by upvoting and downvote if you have passed.....
Nice problems and bad pretests :)
excellent Round,the statements are clear and the problems are interesting.
my first contest in cf. Really enjoyed it even only worked out A — D1. Thanks very much for the excellent problems and answers!
Thank You so much, from the last two questions, I will learn something new.
Nice problem, but bad pretest :(
I think there is a typo in the solution of the first question of "Mocha and Math". I think it should be x = 1 initially instead of 0.
First submission for D1 during contest, WA: 126009094
Post contest submission, AC after removing union by size: 126046481
Can anyone tell me why this is happening?
IN Mocha and math how could i have observed that i had to iterate over all the array to minimize the maximum value??? can someone explain me.
cause AND can't be more than the number. so just AND all
Can anyone please explain me why can't we do
min_element & max_element
to get the ans in problem A ?Consider taking the test case as 11 7 14 3 7 here the answer will be 2
yeah but max = 14 and min = 3 and 14 & 3 is also 2
Consider taking 15 7 14 3 7 here also the answer is 2, whereas 15&3 is 3
A bit in the answer would be ON if it is ON in all the numbers in the given array. This is because whichever two numbers are ANDed together that bit(the one ON in all the numbers) can never be made 0. Just because a bit is ON in the max_element and the min_element it doesn't mean it would be ON in the other numbers too. For eg: [15, 8, 7], 15&7 is 7, but the correct answer is 0 which is the AND of all numbers.
Make sense.. Thanks for the explanation.
How about this test case:
Consider the array [1, 2,5] max=5, min=1 5&1 = 1 5&2 = 0 So the answer is to AND all the elements of the array i.e; 1&2&5=0 My Code: https://codeforces.com/contest/1559/submission/125957676
then i guess the 2nd greater element would still be greater then the bitwise and of max and min
Can somebody explain what is happening in problem A. What is the role of intervals [l,r] . Why didn' it use two pointer approach like we have to iterate from the front and back as well. Why this editorial just & every a[i] ?
What does minimize the maximum value meant? Does & operator minimize value?
The result of performing & operator on any two numbers is always less than or equal to the minimum of the two numbers, since the bits can only turn off. "Minimise the maximum value" meant that the largest element in the array should be as less as possible, if that makes sense.
Problem E:
We can compute it in O(nm) by Knapsack DP optimized by prefixsums.
Bazoka13 can you please explain how prefixsum is used here?
suppose you have calculated dp[i][j] for 0<=j<=m
dp[i+1][j] = dp[i][j] + dp[i][j1] .... dp[i][jcap[i+1]];
here cap[i+1] is upper bound on variable i+1
Thanks can you elaborate more
suppose you have following task: given array $$$a$$$, and you want to do fast multiple times following action: increase value by $$$x$$$ all $$$a_i$$$ within range $$$[l, r]$$$. And after you did all those actions, you need to output resulting array $$$a$$$. This is what you need to do for this knapsack problem. dp[i][j] will store how many arrays of length $$$i$$$ have sum $$$j$$$. And, from array of length $$$i$$$ with sum $$$j$$$ you can do arrays of length $$$i+1$$$ with sums $$$[j+l, j+r]$$$. This range is where you need to add variants. And you increase them by same amount $$$dp[i][j]$$$. So, if you can increase values within range fast, you can solve this. Idea is that instead of actual values we will store array of deltas. To be precise, we will store certain array such that if you make prefix sums array from it, you'll get actual values of dp. For example, instead of storing 1,2,3,2 — values, we will store 1,1,1,1 — deltas. Because we can calculate prefix sums array, and get 1,1+1,1+1+1,1+1+11 = 1,2,3,2. Now, if you increase single element of this array, prefix sums array will change all values in suffix. For example 1 2 1 1 will give 1 3 4 3. So you can increase all values in suffix. Now you can increase within $$$[l, r]$$$ range by increasing value at $$$l$$$ so you'll increase within range $$$[l, \infty)$$$ and decrease value at $$$r+1$$$ so you'll also decrease within range $$$[r, \infty)$$$ which results into increase only within range $$$[l, r]$$$. Once all operations done, you calculate actual values by simply calculation of prefix sums, and use it as next dp[i].
Wow Wow Wow, what a great explanation, I know the prefix sum delta trick but I wasn't able to figure out how the dp works, you made it very very clear. Again Thanks A lot.
In the editorial for B what does following line do? s[i]=s[i1]^('B'^'R');
s[i1] can be either B or R. So if it is 'R' then this expression will become 'B' or 'R' otherwise. because if we do X^X it will become 1 .
Video Tutorial C:https://www.youtube.com/watch?v=H8GDDIAOVdQ
Solution of problem B is not passing the given test case in problem itself.
Since the constraints for problem B is quite small, I have another approach in $$$O(N^2)$$$ which seems more straightforward to me.
For every letter that is not a $$$\tt{?}$$$, I check whether the letter next to it is colored or not. If it isn’t colored yet, I simply paint it the opposite color of the current one. For example with the string $$$\tt{?R?}$$$, the left and the right letter is painted $$$\tt{B}$$$.
If something like $$$\tt{B?R}$$$ occurs, painting $$$\tt{?}$$$ either $$$\tt{R}$$$ or $$$\tt{B}$$$ results in the imperfectness value equal to 2, so this guarantees to be the optimal solution even if I start at $$$\tt{B}$$$ or at $$$\tt{R}$$$. All to do left is to repeat that process until there is no $$$\tt{?}$$$ left in the string.
What about B???
The "B" string doesn't contain any "?" so the algorithm will end immediately
Another brute force solution is to try to color all segments of '?' using 'BRB' or 'RBR' and pick the one with the least cost. Implementation
It is possible to do 2dfs's and brute force all possible edges in one second???
♂That's amazing♂ contest! Enjoyed it!
Nvm found my mistake.
D1 Video Editorial
D2 Video Editorial
In fact, Mobius function is not necessary to be used in E, as it can be solved by inclusionexclusion principle which is similar to the original solution and easier to understand.
[Problem D1] I dont understand why if we connect random two edge will get the best result.
Firstly, you can't connect nodes if they are in the same tree because so you get a cycle. So the only way of adding edges is connecting trees. If you have two trees, you can connect it by adding only one edge because if you add more you get a cycle. And it doesn't matter what nodes you choose. So the best result you can get by iterating over nodes and checking if they are not in the same tree (in both forests). If so, you connect them.
Hope its helpful
In the E's editorial It's mentioned that We can compute it(the number of integers satisfying only the two conditions as mentioned) in O(nM) by Knapsack DP optimized by prefixsums. I am unable to figure this part. Can anyone help?
We want to compute the number of arrays with gcd dividing $$$g$$$. Therefore, we are only allowed to have numbers $$$i\cdot g$$$, for $$$1 \le i \le \lfloor \frac{m}{g} \rfloor$$$. The sum of numbers in the array is also a multiple of $$$g$$$. From each range $$$[L, R]$$$ we are only allowed to have numbers $$$i \cdot g$$$, where $$$\lceil \frac{L}{g} \rceil \le i \le \lfloor \frac{R}{g} \rfloor$$$. Therefore, each segment $$$[L, R]$$$ can be transformed into a segment $$$[\lceil \frac{L}{g} \rceil, \lfloor \frac{R}{g} \rfloor ]$$$. Consider $$$knapsack[k][j]$$$ — the number of ways to get sum $$$j \cdot g$$$ from the first $$$k$$$ elements. When we add a new element $$$a_{i+1}$$$ with the possible range $$$[\lceil \frac{L}{g} \rceil, \lfloor \frac{R}{g} \rfloor ]$$$, we can see that the state $$$knapsack[i][j]$$$ contributes to states starting from $$$knapsack[i+1][j + \lceil \frac{L}{g} \rceil]$$$ and ending with the $$$knapsack[i+1][j + \lfloor \frac{R}{g} \rfloor]$$$, so we need to add value $$$knapsack[i][j]$$$ to some segment of $$$knapsack[i+1]$$$. We can do this with the difference array technique (see https://codeforces.com/blog/entry/88474?locale=en ). So we compute $$$knapsack[i+1]$$$ as a difference array, and then at the next step we compute prefix sums of this array to get the actual values of $$$knapsack[i+1]$$$ and proceed to computing $$$knapsack[i+2]$$$. Note that, as always, we can only store two layers. See my code for more details: https://codeforces.com/contest/1559/submission/126027569
Another approach for the knapsack DP part is to treat it as a problem of multiplying polynomials. For the case where we want sequences of gcd >= 1 the polynomial of the interval $$$[l_{i}, r_{i}]$$$ is $$$x^{l_{i}} + x^{l_{i}+1} + \ldots + x^{r_{i}}$$$. To get the number of sequences where the gcd>=1 and the sum <= m you multiply all these polynomials and sum the coefficients of the terms for powers <= $$$x^{m}$$$. You can try doing this with NTT but it's too slow. The key observation is $$$x^{l_{i}} + x^{l_{i}+1} + \ldots + x^{r_{i}} = \frac{x^{l_{i}}x^{r_{i}+1}}{1x}$$$ This means if you have the current polynomial $$$p(x)$$$ and you want multiply by the polynomial for $$$[l_{i}, r_{i}]$$$ you can do it in $$$O(m)$$$ by multiplying by $$$x^{l_{i}}x^{r_{i}+1}$$$ then dividing by $$$1x$$$. The case where you want gcd>=g is the same but you replace the interval $$$[l_{i}, r_{i}]$$$ with $$$[\lceil l_{i}/g \rceil, \lfloor r_{i}/g \rfloor]$$$ and $$$m$$$ with $$$\lfloor m/g \rfloor$$$. 126171468
my solution for 1559B import java.util.Scanner;
public class bsoln {
} Can anyone tell me whats my mistake as its showing wrong answer in testcase2
Any proof in D1 that this is sufficient ? merging each possible two trees when they're not already merged in the other graph (Diana's) how can we be sure that working like this greedily will not affect the next step badly ?
You always have the option to connect two nodes if none of the two forests is a tree. The proof is in the tutorial:
In the final situation, if one forest has more than one tree, we choose two trees from it, such as tree A and tree B. Then we consider node a in A and node b in B, they must be connected in another forest. We can easily find node b is connected with all the nodes in A and node a is connected with all the nodes in B. So nodes in A and B are in the same tree in another forest. If we consider other trees, we can get the same conclusion. Hence nodes in another forest form only one tree.
So if you can't find two nodes to merge, that means you achieved the optimal answer.
So for like two trees A,B... no matter how many ways there are to merge them their case is totally independent from everything will happen in later steps in other trees but why exactly ? the tutorial doesn't explain this point clearly
Let say $$$a$$$ and $$$b$$$ is number of components in corresponding forests. Each time you add edge, number of components in both forest decrease by one, no matter which edge you pick. So, after first edge, number of components is $$$a1$$$ and $$$b1$$$, after second edge $$$a2$$$ and $$$b2$$$. Then after you add $$$min(a, b)  1$$$ edges you will get tree.
Although which edge you pick may affect later options, it won't make your answer worse.
The tutorial proved that if you can't find an edge to add, one of the forests is always a tree. Obviously, if one forest becomes a tree, it is the optimal answer.
So you can greedily add any edge you can add until you can't add any more. And when you can't add an edge, you achieved the optimal answer.
Alternative solution for 1559D2  Mocha and Diana (Hard Version).
Consider following question. How to find for fixed vertex $$$v$$$ all vertices $$$u$$$ which if you add this single edge both graphs will not have any cycles. In first graph $$$u$$$ should not be connected to $$$v$$$. In second graph it also should not be connected to $$$v$$$. Lets do dfs from $$$v$$$ in first graph and mark all vertices reachable from $$$v$$$ in first graph. Similarly, lets do dfs from $$$v$$$ in second graph. Now, you can connect $$$v$$$ to $$$u$$$ if $$$u$$$ is not marked in both graphs. Lets make array $$$c$$$ which will store in how many graphs we marked vertex. So $$$c_v$$$ would be 2 if vertex $$$v$$$ was marked in both graphs and $$$c_v$$$ would be 0 if vertex $$$v$$$ was not marked at all. With this array filled correctly, we can tell that any vertex $$$u$$$ with $$$c_u = 0$$$ can be connected with edge $$$(v, u)$$$.
Now, following algorithm: for each vertex $$$v$$$ from $$$1$$$ to $$$n$$$ fill $$$c$$$ (mark vertices) then find any single zero, add single corresponding edge, and repeat, while there is zero. If there is no zero, continue with next vertex. This will work in $$$O(n^2)$$$ time because for each vertex we run two dfs in $$$O(n)$$$, which is $$$O(n^2)$$$, and also we can add at most $$$n$$$ edges which results into additional $$$2n$$$ dfs, so total time complexity is $$$O(n^2+2\cdot n^2)$$$ which is $$$O(n^2)$$$. Note: I didn't prove here that it will give optimal answer.
Notice, that when you add edge and refill marks with same $$$v$$$ you just add additional marks for additional components in both graphs. Instead of clearing all marks, you can just add those which appeared caused by new edge. Using this observation, we can add as many edges for single vertex $$$v$$$ in $$$O(n)$$$ time, because we can't mark more vertices than whole graph. Trouble is: what to do when we need to continue to next $$$v$$$? It may be in other component and we would need to refill $$$c$$$ from scratch.
The idea is following: we can walk along all vertices in single component in first graph, and when we add edge, we can just add vertices in queue of this walk. Then we would not need to clear all marks in first graph until we finish walking in whole component. But then, we still need to redo dfs in second graph each time we change $$$v$$$ (which is within component of first graph). I didn't tell how can we fast get index of 0 in $$$c$$$. We can maintain two sets: $$$s0$$$ with indices of zeroes in $$$c$$$ and $$$s1$$$ with indices of nonzeroes. And each time we change $$$c$$$, move index from one set into another accordingly. Then, when we need to get index of 0 we can pick any number from $$$s0$$$.
Next key observation, is that when we do dfs in second graph from vertex $$$v$$$, we mark some of vertices from component of first graph, and when we reach them, for example some vertex $$$w$$$, we will try to mark same component from second graph again, it will be exactly same component filled from vertex $$$v$$$, but we tried this already when we did dfs from $$$v$$$, and if there was any zero, we would add edge, thus there won't be zero, and we don't have to spend any time, just skip it. In other words, while walking in component of first graph, we should maintain visited vertex in second graph separately with marks in second graph. And as soon as we want to run dfs in second graph from visited vertex in second graph — it won't give any result so just ignore it. After this change, we will have running time $$$O(n \log n)$$$ for single component of first graph, including all added edges we find.
Remaining issue: components of first graph with added edges can be multiple. Last key observation: we don't need to check any other components. First component is enough. We can prove it by contradiction. Suppose there is edge we didn't add because it connects two vertices $$$(u, v)$$$, and both $$$u$$$ and $$$v$$$ is not in component of first graph we checked. Then, there is component of u in first and second graph, and component of v in first and second graph. All those four components should be different to be able to add $$$(u, v)$$$. Then, pick any vertex $$$w$$$ from component that we was checking in first graph. We can add $$$(w, u)$$$ or $$$(w, v)$$$ edge. Because in first graph $$$u, v, w$$$ are disjoint, and in second graph, $$$w$$$ can be reachable either from $$$u$$$ or from $$$v$$$, so we can add edge to unreachable in second graph. Contradiction with assumption that we didn't find. So, we said that single component of first graph is checked in $$$O(n \log n)$$$, and we now proved that we only need to check single component, so this solution run time is $$$O(n \log n)$$$. My messy implementation: 126065463
Python cleaned version: 126093423 Forgot to mention above: when join with visited in second graph we should also stop checking v.
MikeMirzayanov, can we have a feature to favorite comments as well. Some comments like this are indeed insightful and helpful as some problems and blogs as well.
It's already present, you can see the star outline(beside the link to comment(#) option). Click on it.
Should this submission for 1559D2  Mocha and Diana (Hard Version) be accepted? 126011074
What it does is similar to choosing one component in each graph randomly and connecting them if possible each time, and repeating the process until it reaches 1.8s.
After changing it to trying for only 2n times, it also passes the tests. 126093360
If the random part works well, is this a submission that can be hacked or a good solution that proves to be correct most of the time? (I think maybe it's a good solution because of m << 100000^2)
Even simpler solution for D:
If vertices $$$u_1$$$ and $$$v_1$$$ belong to different trees in Mocha's forest and vertices $$$u_2$$$ and $$$v_2$$$ belong to different trees in Diana's forest then one of 6 edges between $$$u_1, v_1, u_2, v_2$$$ can be added. It's pretty easy to prove and we also don't even care if some of them are equal. Now just keep a vertex for every tree in both forests and do this until you only have one tree in one of the forests.
Could you explain your solution a bit more? Obviously adding edges in u1 to u2/v2 i.e. across Diana's and Mocha's forest doesn't make sense.
It does!
If $$$u_1$$$ and $$$v_1$$$ belong to the same tree in Diana's forest (otherwise pair $$$(u_1, v_1)$$$ works) then either $$$u_2$$$ or $$$v_2$$$ is from a different tree from these two. Let's say it's $$$v_2$$$. Then both pairs $$$(u_1, v_2)$$$ and $$$(v_1, v_2)$$$ are from different trees in Diana's forest and at least one of them is from different trees in Mocha's forest as well since $$$v_2$$$ is the same in both pairs but $$$u_1$$$ and $$$v_1$$$ are from different trees.
I have better E solution. It is pretty similar to editorial, but it doesn't use Möbius function and easier to understand.
Let $$$f(k)$$$ be number of sets of integers, which are bounded($$$l_i \leqslant a_i \leqslant r_i$$$ for all $$$i$$$) and sum of $$$a_i$$$ doesn't exceed $$$M$$$ and also for all $$$i$$$, $$$a_i$$$ must be divisible by $$$k$$$. It can be calculated with DP for $$$O\left(n\frac{M}{k}\right)$$$ time and $$$O\left(\frac{M}{k}\right)$$$ space as in editorial.
So calculating $$$f(i)$$$ for all $$$1 \leqslant i \leqslant m$$$ will take $$$O(nM \log M)$$$ time.
Let $$$d(k)$$$ be defined like $$$f(k)$$$, but instead of condition "for all $$$i$$$, $$$k \mid a_i$$$" we use condition "gcd of all numbers is $$$k$$$". Thus by definition, answer is $$$d(1)$$$.
Also it is clear, that $$$d(i) = f(i)  (d(2i) + d(3i) + \ldots)$$$, hence all $$$d(i)$$$ can be calculated by this formula for $$$O(M \log M$$$) because of harmonic series.
Submission using this solution: 126092551
Kudos to person who set the C problem: Scary but easy. A & B were also very good.
Is the complexity of D1 O(n^2) or O(n^2 * log n) ?
I tried implementing D1 solution after reading editorial, but my solution is getting TLE. Can someone explain to me why does my solution is giving TLE but editorial not.
The only difference is that I used while loop instead of recursive function (getf)
here is my code
the minimal value of the maximum value in the sequence. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Please any explanation for this sentence I couldn't understand it
+1
In problem D2, can someone explain the this line about the time complexity," This is because an element moves to a new row/column O(logn) times and each move is O(logn) time (using STL set in cpp)" with respect to the operations that are happening. I understood the operations above but not able to figure out the time complexity. To me, it seems more like O(logn)+O(logn).
I think , in a row there can be at most n elements to merge, and each element is merged logn times max, as rows are merged by size (DSU by rank logic) and logn to insert in set each time. so for 1 element (logn)^2 , for n elements n*(logn)^2.
Can someone explain the xor part in B given in the editorial?
That's simple dude its just getting the opposite character from the one present there already from B and R
Unable to understand the code given in D1 can anyone help ??
To detect whether connecting two vertices is possible (there will be no cycle after connection) we can use DSU (disjoint set union, tutorial). Now we assume that for each vertex A and B we know whether we can connect them or not. Just try to brutforce all pairs of vectices. If connection of two vectices in both forests wouldn`t create a cycle add new edge. Example of code: 126156817. Hope this will help you. Good luck :)
Can someone please explain why $$$\sum_{dgcd(a_1,a_2,...,a_n)}\mu(d) = \sum_{da_1,da_2,...,da_n}\mu(d)$$$ ? Couldn't wrap around my monke brain :(
d
is a divisor of $$$gcd(a_1,a_2,...,a_n)$$$, sod
is also a divisor of $$$a_1$$$ and $$$a_2$$$ ...But the righthand also includes divisors that aren't divisors of $$$gcd(a_1,a_2,...,a_n)$$$, do they cancel each other out?
Can someone help me explain this from problem B
s[i]=s[i1]^('B'^'R');
`` What does 'B'^'R' return?? ouput of 'B'^'R' is not showing when I run it in c++.s[i]=s[i1]^('B'^'R')
basically turns s[i] depending upon the value of s[i1]. If s[i1] is 'B', s[i] changes to 'R' and if s[i1] is 'R', s[i] changes to 'B'.'B'
,'R'
in C++ is number equal to ASCII code of symbol. It basically means that char is also integer in C++, it just has fewer bytes and narrow range. You should also know that(x^y)^z = x^(y^z)
. This is basic property of XOR. It's easy to prove by considering bits separately. Then, feed'B'
into formula you get'B'^('B'^'R') = ('B'^'B')^'R' = 0^'R' = 'R'
. Also, you should know thatx^y = y^x
, so if you feed'R'
into formula you get'R'^('B'^'R') = ('B'^'R')^'R' = 'B'^('R'^'R')='B'^0='B'
@r57shell Thank you so much; you explained so nicely it literally cleared all the doubts. Also, may I know how one can think towards using a xor in these types of questions.
Looking up properties of XOR can yield all the above mentioned stuff. This maybe useful "Save yourself a register" at https://accu.org/journals/overload/20/109/lewin_1915/
thanks, akshpan appreciated
i was trying to solve d1 using union find, that is, i pick 2 nodes i and j, if they share same parent in the first forest or the second forest, we can't make this connection, because then we would be forming a loop, else, make that connection, make node j have same root as node i
was my approach wrong?
DSU doesn't work like this. If you want to join two sets that have nodes $$$i$$$ and $$$j$$$, you have to find the leaders (roots) of each set and update their parents, not for $$$i$$$ and $$$j$$$.
you are right, i already knew this, this is a crucial implementation detail i forgot for some reason when i was writing the solution, thanks
Problem E can be solved in $$$O(m \log^2 m \log n)$$$ if $$$l_i$$$ and $$$r_i$$$ are constant in each case.
How
ios_base::sync_with_stdio(false);cin.tie(0);cout.tie(0) Please explain me brothers what does this mean?
Just use it as it is, it makes input/output operations faster, which results in faster runtime. Just use it without knowing what is it(abstraction) and believing it does ur job!!
Thanku my friend. ;)
Can anyone please explain why Topological sort doesn't work on 1559C. I am not able to think any test case where it doesn't work.
Can anyone provide a more elaborative solution to problem B (Mocha and Red and Blue)?