Идея: adedalic
Разбор
Tutorial is loading...
Решение (adedalic)
#include<bits/stdc++.h>
using namespace std;
int main() {
int t; cin >> t;
while (t--) {
int k; cin >> k;
cout << 100 / gcd(100, k) << endl;
}
return 0;
}
1525B - Сортировка перестановки
Идея: BledDest
Разбор
Tutorial is loading...
Решение (Neon)
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
scanf("%d", &t);
while (t--) {
int n;
scanf("%d", &n);
vector<int> a(n);
for (int &x : a) scanf("%d", &x);
int ans = 2;
if (is_sorted(a.begin(), a.end()))
ans = 0;
else if (a[0] == 1 || a[n - 1] == n)
ans = 1;
else if (a[0] == n && a[n - 1] == 1)
ans = 3;
printf("%d\n", ans);
}
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение (awoo)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
struct bot{
int x, d;
};
int main() {
int t;
cin >> t;
forn(_, t){
int n, m;
scanf("%d%d", &n, &m);
vector<bot> a(n);
forn(i, n) scanf("%d", &a[i].x);
forn(i, n){
char c;
scanf(" %c", &c);
a[i].d = c == 'L' ? -1 : 1;
}
vector<int> ord(n);
iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&a](int x, int y){
return a[x].x < a[y].x;
});
vector<int> ans(n, -1);
vector<vector<int>> par(2);
for (int i : ord){
int p = a[i].x % 2;
if (a[i].d == -1){
if (par[p].empty())
par[p].push_back(i);
else{
int j = par[p].back();
par[p].pop_back();
ans[i] = ans[j] = (a[i].x - (a[j].d == 1 ? a[j].x : -a[j].x)) / 2;
}
}
else{
par[p].push_back(i);
}
}
forn(p, 2){
while (int(par[p].size()) > 1){
int i = par[p].back();
par[p].pop_back();
int j = par[p].back();
par[p].pop_back();
ans[i] = ans[j] = (2 * m - a[i].x - (a[j].d == 1 ? a[j].x : -a[j].x)) / 2;
}
}
forn(i, n){
printf("%d ", ans[i]);
}
puts("");
}
return 0;
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение (BledDest)
#include <bits/stdc++.h>
using namespace std;
const int INF = int(1e9);
int main()
{
int n;
cin >> n;
vector<int> a(n);
for(int i = 0; i < n; i++)
cin >> a[i];
vector<int> pos;
for(int i = 0; i < n; i++)
if(a[i] == 1)
pos.push_back(i);
int k = pos.size();
vector<vector<int>> dp(n + 1, vector<int>(k + 1, INF));
dp[0][0] = 0;
for(int i = 0; i < n; i++)
for(int j = 0; j <= k; j++)
{
if(dp[i][j] == INF) continue;
dp[i + 1][j] = min(dp[i + 1][j], dp[i][j]);
if(j < k && a[i] == 0)
dp[i + 1][j + 1] = min(dp[i + 1][j + 1], dp[i][j] + abs(pos[j] - i));
}
cout << dp[n][k] << endl;
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение (adedalic)
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
#define x first
#define y second
typedef long long li;
typedef pair<int, int> pt;
const int MOD = 998244353;
int norm(int a) {
while (a >= MOD)
a -= MOD;
while (a < 0)
a += MOD;
return a;
}
int mul(int a, int b) {
return int(a * 1ll * b % MOD);
}
int binPow(int a, int k) {
int ans = 1;
while (k > 0) {
if (k & 1)
ans = mul(ans, a);
a = mul(a, a);
k >>= 1;
}
return ans;
}
int inv(int a) {
return binPow(a, MOD - 2);
}
vector< vector<int> > d;
int n, m;
inline bool read() {
if(!(cin >> n >> m))
return false;
d.resize(n, vector<int>(m));
fore (i, 0, n) fore (j, 0, m)
cin >> d[i][j];
return true;
}
inline void solve() {
int invFact = 1;
fore (i, 1, n + 1)
invFact = mul(invFact, i);
invFact = inv(invFact);
int E = 0;
fore (j, 0, m) {
vector<int> cnt(n + 1, 0);
fore (i, 0, n)
cnt[n + 1 - d[i][j]]++;
vector<int> d(n + 1, 0);
d[0] = 1;
int rem = 0;
fore (i, 0, n) {
rem += cnt[i];
d[i + 1] = norm(d[i + 1] + mul(d[i], rem));
rem = max(0, rem - 1);
}
// cerr << d[n] << " - " << norm(1 - mul(d[n], invFact)) << endl;
E = norm(E + 1 - mul(d[n], invFact));
}
cout << E << endl;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
if(read()) {
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение (BledDest)
#include <bits/stdc++.h>
using namespace std;
const int N = 543;
const long long INF = (long long)(1e18);
struct Matching
{
int n1, n2;
vector<set<int>> g;
vector<int> mt, used;
void init()
{
mt = vector<int>(n2, -1);
}
int kuhn(int x)
{
if(used[x] == 1) return 0;
used[x] = 1;
for(auto y : g[x])
if(mt[y] == -1 || kuhn(mt[y]) == 1)
{
mt[y] = x;
return 1;
}
return 0;
}
int calc()
{
init();
int sum = 0;
for(int i = 0; i < n1; i++)
{
used = vector<int>(n1, 0);
sum += kuhn(i);
}
return sum;
}
void remove_vertex(int v, bool right)
{
if(right)
{
for(int i = 0; i < n1; i++)
g[i].erase(v);
}
else
g[v].clear();
}
void add_edge(int x, int y)
{
g[x].insert(y);
}
Matching() {};
Matching(int n1, int n2) : n1(n1), n2(n2)
{
g.resize(n1);
};
};
int n, m, k;
long long dp[N][N];
int p[N][N];
vector<int> g[N];
long long x[N], y[N];
int main()
{
cin >> n >> m >> k;
for(int i = 0; i < m; i++)
{
int u, v;
cin >> u >> v;
--u;
--v;
g[u].push_back(v);
}
for(int i = 0; i < k; i++)
cin >> x[i] >> y[i];
Matching mt(n, n);
for(int i = 0; i < n; i++)
for(auto j : g[i])
mt.add_edge(i, j);
int cnt = mt.calc();
int cur = cnt;
vector<int> seq;
while(cur > 0)
{
int idx = 0;
for(int i = 0; i < n; i++)
{
Matching mt2 = mt;
mt2.remove_vertex(i, false);
if(mt2.calc() < cur)
idx = i + 1;
mt2 = mt;
mt2.remove_vertex(i, true);
if(mt2.calc() < cur)
idx = -(i + 1);
}
assert(idx != 0);
seq.push_back(idx);
mt.remove_vertex(abs(idx) - 1, idx < 0);
cur--;
}
reverse(seq.begin(), seq.end());
for(int i = 0; i <= k; i++)
for(int j = 0; j <= cnt; j++)
dp[i][j] = -INF;
dp[0][cnt] = 0;
for(int i = 0; i < k; i++)
for(int j = 0; j <= cnt; j++)
{
if(dp[i][j] == -INF) continue;
for(int z = 0; z <= j; z++)
{
if(i + 1 + z >= n) continue;
int t = j - z;
long long add = max(0ll, x[i] - t * y[i]);
if(dp[i + 1][z] < dp[i][j] + add)
{
dp[i + 1][z] = dp[i][j] + add;
p[i + 1][z] = j;
}
}
}
cur = max_element(dp[k], dp[k] + cnt + 1) - dp[k];
vector<int> res;
for(int i = k; i > 0; i--)
{
res.push_back(0);
for(int j = p[i][cur] - 1; j >= cur; j--)
res.push_back(seq[j]);
cur = p[i][cur];
}
reverse(res.begin(), res.end());
cout << res.size() << endl;
for(auto x : res) cout << x << " ";
cout << endl;
}
Thanks for this nice and clear tutorial
The author's implementation of task C is much better than mine.
Thanks, that's why we didn't question it being C.
Finally editorial is out.
In Problem D I find this sentence very confusing:
$$$ $$$ "let $$$dp_{i, j}$$$ be the minimum time if we considered $$$i$$$ first positions and picked $$$j$$$ of them as the ending ones."
What does "pick $$$j$$$ of them (them='$$$i$$$ positions'?) as the ending ones" mean? Shouldn't it be:
$$$ $$$ " let $$$dp_{i, j}$$$ be the minimum time if we considered $$$i$$$ first starting positions and $$$j$$$ first ending positions."
This state represents a situation when we've considered the armchairs $$$0, 1, 2, \dots, i - 1$$$ and exactly $$$j$$$ of them are chosen as ending positions (so, after the whole process is done, exactly $$$j$$$ people will sit on the segment $$$[0, i - 1]$$$ of the armchairs).
I didn't get the thing. What exactly dp[i][j] is representing? Can someone help? Thanks!
The representation of dp[i][j] is that we have considered first i seats and we have made the consideration of the first j seats on which the people were sitting initially and have given them some new position.
I've managed to solve this task now. My understanding/interpretation is not exactly the same as in the tutorial, but maybe this image (which I also drew for myself while solving) will help you.
I counted the ones and the zeros in two seperate arrays. the $$$x_i$$$ and $$$y_j$$$ save the index in the original array (so $$$x_1=0$$$ and $$$y_2=2$$$ in my picture). Then $$$i$$$ says us, how many People we chose, $$$j$$$ tells us, how many empty chairs we chose. And $$$dp_{i,j}$$$ tells us the cheapest solution to place those $$$i$$$ people on those $$$j$$$ chairs.
Some examples:
$$$dp_{1,1}$$$ choses only one person $$$x_1$$$ and one chair $$$y_1$$$. What is the best solution in this case? It's $$$abs(x_1-y_1)$$$ since we have only one possibility. Same with e.g. $$$dp_{5,5}$$$, we place the first 5 people on the first 5 chairs. There is only one possibility.
$$$dp_{1,max(j)}$$$ choses only one person $$$x_1$$$ and all empty chairs. What is the best solution in this case? For each pair of $$$x_1$$$ and some chair $$$y_j$$$ we measure the distance. The smallest distance is the answer.
$$$dp_{4,3}$$$ choses 4 people and tries to sit them on 3 chairs. This is impossible, so we assign the value $$$\infty$$$ here.
Now the interesting part is the transition. how would we calculate $$$dp_{3,5}$$$? We want to find the cheapest solution to place 3 people on 5 chairs. We compare two steps. We could try $$$dp_{3,4}$$$ because it is also a solution for $$$dp_{3,5}$$$ (not neccessarily the optimal one) but it is a possible way to place 3 people on 4 places, so its also a valid distribution to place those 3 people on 5 places. Or we could try $$$dp_{2,4}$$$ and add person $$$x_3$$$ on place $$$y_5$$$. This has the cost $$$dp_{2,4} + abs(x_3-y_5)$$$. This way we obtain: $$$dp_{3,5}=min(dp_{3,4} \,; dp_{2,4} + abs(x_3-y_5))$$$. This relation is enough to obtain the solution, which will be $$$dp_{max(i),max(j)}$$$
I really hope this helps and doesn't just confuse even more.
Great help, thx.
That is really a good explanation, I have written AC code following this exact explanation if anyone is interested.
D
Yes it helped. Thanks
greatly explained !!
wouldnt there be any case where suppose we shift index i+1 to i+2 and then move i to i+1. here i think we are only considering the space that are initially blank?? @OleschY
That's why you have to update the $$$dp_{i,j}$$$ in a specific order (as is customary for DP-Solutions). More specifically, $$$dp_{i,j}$$$ depends on $$$dp_{i,j-1}$$$ and $$$dp_{i-1,j-1}$$$. So the two latter values have to be updated before $$$dp_{i,j}$$$. You can achieve this e.g. by iterating $$$i$$$ from $$$1$$$ to $$$max(i)$$$ in the outer loop and iterating $$$j$$$ from $$$i$$$ to $$$max(j)$$$ in the inner loop. Then it all works out!
wow, this was a brilliant explanation. Thanks a ton :)
Very crystal clear explanation brdr...this is how I exactly defined the states but was having a problem in writing the transition...thanks a lot !!
Ah, I think D is easier than C.
Maybe ABDCEF is a better choice.
I thike E is also easier than C.If you're familiar with expectations and probabilities,you can solve it easily.
Can you list some tutorials or articles?
This is a good one.
In the B part problem if the question was changed a little bit that instead of rearranging the elements of subarray we have to choose some subarray and reverse it . Then what will be the approach for that problem.
Check code jam qualification round 2021, you will find the exact problem with analysis.
It took over 24 hours for the Editorial to appear :)
The problems were cool, tho... I really enjoyed that round.
Thanks for the clean explanation and solution. But I still don't know why did I get a WA test 8 at D tho...
haha, everyone who wrote greedy soln. faced that
6
0 0 1 1 1 0
this case is failing for greedy solution of my ans. I think you must have done same mistake
NGL soln of C is pretty neat. I hated this problem before reading the editorial.
For Problem E, why do we subtract 1 in the second step, 2 in the third step and so on?
In 2nd step,the nth city is fixed.And in 3rd step the nth point and the n-1th city is fixed.
The C is so cool.
does anyone know why some problems use 10^9 + 7 as mod while other problems use 998244353 as a mod?
It has to do with FFT, as I understand. See here.
thanks!
In problem D if time take to move from $$$i$$$ to $$$j$$$ is some function $$$f(i,j)$$$ which is quadratic or of higher degree then this $$$dp$$$ solution will not work right?(because we can't say- "the leftmost starting position is matched with the leftmost ending" for optimal solution). How we can solve in this case?
I'd like to mention that it is well known in China how to solve problem D in O(n) time, and other generalisations of the problem are also studied and known. See This PDF (Problem 2 is problem D). The pdf is in Chinese but I don't have a better resource for it, maybe people interested can try google translate. Maybe you already know it, but I'm posting for those who don't.
Is there anything that is non-standard for chinese guys ?
I hope that they ask the same question in some other contest with bigger constraints: Expected solution O(n). That way, I will learn the method by reading blogs and editorials.
Reading, translating, and understanding a Chinese document seems too difficult for me.
Could you give me some other access to the pdf without vpn in China? I can hardly download it.
Could you give me some other access to the pdf without using vpn in China? I can hardly download it.
Oh, I borrow someone's VPN acount and download it, thank you for sharing this algorithm!
Even though I'm Chinese I still can't understand it :(
Nice.
Can someone please explain in detail how to solve D using flows? Also, flows tag has been assigned to the problem in the Problemset.
Make a graph with n+2 nodes, one for each armchair, a source and a sink. Add an edge between adjacent armchairs with cost 1 and infinite capacity. For occupied chairs, add an edge from the source to that chair with cost 0 and capacity 1, same for unoccupied chairs to the sink. Now you can run min-cost max-flow on this.
Can you elaborate on the complexity of this solution ?
and how it can pass under those constraints !
So each chair has one edge either from the source, or to the sink, that's $$$N$$$ edges. Additionally, each pair of adjacent chairs has two edges, one in each direction. So that's $$$N + 2(N - 1) = 3N - 2$$$ edges. Kactl's MCMF runs in $$$O(E^2) = O(2 * 10^8)$$$ but I guess the implementation ended up being too slow. AtCoder Library has one that runs in $$$O(F(V+E)\log(V + E))$$$, and was fast enough to pass. Here, $$$F$$$ is the max flow which we know is at most $$$\frac{N}{2}$$$.
In this problem even MCMF which uses SPFA will be acceptable and it isn't hard to prove. Here can be n/2 edges from source at maximum (occupied chairs) and in worst case the flow will visit n-1 edges between chairs + n-1 edges from unoccupied chairs to sink. So the time complexity will be O( n/2 * (n-1+n-1) ) = O(n^2) = O(2*10^7).
Heyy,
I don't know much about this topic. Can Someone please share some resource or questions from where I can get to know more about it
Solution for C was nice :D . But can someone tell me why I am getting this runtime error on test 4 without any diagnostics for my solution? My Submission NVM. I got it. IGNORE :)
For Problem D is there any greedy solution?
In editorial of problem E, Can anyone explain the meaning of this sentence "Let's for each turn k∈[0,n) calculate the number of cities that you can build Monument in starting this turn as cnt[k]"? What exactly are we storing in cnt array?
I have the same question
I have added few comments in the code snippet which is necessary to understand the explanation further.
For each point $$$j, 0 <= j < m$$$ (0 based indexing), $$$cnt[i]=$$$ number of cities in which we can build a monument on the $$$i^{th}$$$ day, such that the $$$j^{th}$$$ point won't be captured by any of the cities $$$\epsilon$$$ $$$cnt[i]$$$.
Can anyone help me out to find my mistake for solution of Problem D. What I am doing is that for every position already occupied, I at first find a position of the leftmost available place to that and the rightmost available place. If any of these two positions is already occupied, then I simply choose the one available, and in case if both are available, then I simply pick the position which takes the minimum time. This is my code : 116381349
6
0 0 1 1 1 0
This case is failing for your answer and also for almost every greedy solution.
nocriz already mentioned $$$O(n)$$$ solution for D, but sadly not everyone can read chinese. I will explain another $$$O(n)$$$ solution from frodakcin
Let's look at the resulting solution and analyze it. I claim that resulting array can be divided into segments such that each segment is either
1) All cells in a segment are not people and are not new places for them -- useless cells. All segments of this type will have length one, and if there are $$$m$$$ people, there will be exactly $$$n-2m$$$ such segments.
2) Segment of cells such that it has even length at it has same amount of people as new seats for them inside and people match places in this segment (for example, segment has length 4, and there are 2 people who moved to 2 places)
To prove, let's find first not useless cell and go to the right, maintaining balance. Each person gives $$$+1$$$, and each taken place gives $$$-1$$$. If we arrive at balance 0, we got segment of type 2 and can start new one. If we didn't arrive at balance 0, there are two cases
1) we reached the end of array. That is just impossible, because number of taken places is equal to the number of people
2) we reached useless cell with non-zero current balance. Again, that is impossible because that would mean that we have non-zero balance on a segment, so someone (person or place) from this segment is matched to something to the right of empty cell and this is just not an optimal answer (we can match with empty cell instead)
Now you can notice that there are only $$$O(n)$$$ interesting segments. All segments of type 1 are just single cells, and all segments of type 2 are $$$[l, r]$$$ such that for each $$$l$$$, $$$r$$$ is minimal such that
balance([l, r]) = 0
. And we have to "construct" our array from these segments. In other words, we can replace segments $$$[l, r]$$$ with cost $$$c$$$ with edge from $$$l$$$ to $$$r+1$$$ with cost $$$c$$$ and find shortest path from $$$1$$$ to $$$n+1$$$. Obviously you can find shortest path in $$$O(n)$$$ if you know costs of all edges.Cost of type-1 edge is $$$0$$$. To calculate cost of type-2 edge, let's write it: it is sum of $$$abs (person\_position - place\_position)$$$. Since this is minimal segment with balance $$$0$$$, you can see that all values $$$(person\_position - place\_position)$$$ will have the same sign, so you just need $$$abs(sum(person\_positions) - sum(empty\_places\_positions))$$$, and sum of such positions you can easily calculate with prefix sums.
You can look at my submission with some comments here
.
I found problem E very interesting. This was the first time I came across a probabilty related problem in competitive programming. [ still a noob:) ]
can anybody please give a more detailed explanation for D as I am not good with DP , after reading this tutorial for many times still I am stuck , Thanks in advance.
This is my Logic
And this is my Submission:116702763
Thanks, it really helped.
How on earth is E rated just 2100?
Problem 1525C - Robot Collisions is very interesting and it's tutorial is more interesting. Thanks a lot BledDest
I am wondering what would the solution be for problem D if the problem is extended to 2d array.
[moving cost being L1]
If problem D is modeled as a linear sum assignment problem (a bipartite graph with 2n vertices and connection between vertices i and j+n if a[i] = 1 and a[j] = 0), there is some general linear sum assignment algorithm that doesn't give TLE? I tryed successive shortest path (with O(E*logV) Dijkstra as intermediate step). I think it solves the general linear sum assignment problem in O(n²*logn) but it gives TLE on testcase 31 for this problem (code). So I wondered if it's some problem in my implementation or just the fact that the time limit for this problem is tight enough to avoid solutions using a general linear sum assignment algorithm.
I solved that problem with min cost max flow, taking care of having E = 3*V
https://codeforces.me/contest/1525/submission/125517979
In C, how did this formula come?
Problem F is so goooood ;w;
Here is my thought process on deriving the solution:
At first, I thought of the problem Skiers, where the minimum number of traversal is "the maximum anti-chains / minimum path-cover", which there is O(N^4) algorithm to solve it on DAG graph (and O(N) algorithm for directed acyclic planar graph). However, I realized after that this problem is quite different because each traversal "can't cross" with each other on any vertex. Then, it should be something else.
Then I came up with another approach that based on the observation that "each node can only pick at most one out going edge". Then, each time we pick one outgoing edge on a node, the number of "chains" decrease by one. Then, we just count the number of nodes having at least one going edge and subtract that from n and we will get the number of minimum goblins needed to overrun the town.
However, I realize not long after that the approach is wrong, as each node can pick at most one incoming edge. There might conflicts arise if two nodes pick two edges that go into the same node.
Hmmm, Wait. "Conflict" arising from "two nodes pick the edges that going into the same node" ? Isn't that sound familiar? OH yeah, it's maxflow problem.
And then I think splitting the graph into layers and make maxflow between two layers at a time, which I thought it was too complicate, and maybe it's better save this approach for later, because this approach makes me realize the key observation of the problem : "For each edges that goblin pass through, the number of chains decreases by one".
Suddenly, every pieces of puzzle fit together. We want to find the maximum number of edges we can pick, such that each node can only pick at most one outgoing edge, and each node can have at most one incoming edge. Isn't this also sound familiar? YES, it's a bipartite matching problem.
One last thing to concern: now that we can determine the minimum number of goblins need to make a graph loses. How do we know which node to turn off (incoming/outgoing) ? I then look closely to our bipartite matching graph, the "turn off" operation is the same thing as "cutting the edge from the souce to an interval node", or "cutting the edge from an internal node to the sink".
Which edge to cut inorder to guarantee decrease the maximum flow by 1 (is there even such edge at all?)? Hmm, I haven't solve this problem before. Let's try consider a min-cut. And bravo, I then discover that "cutting an edge of min-cut is guarantee to decrease the flow by one" (from the observation that "if we take out all the edges of MinCut, then the source won't be able to reach the sink." and some other observation like "cutting one edge can decreases at most one flow".) And it's guarantee that the min-cut will have edges only from "source to internal node" and "internal node to sink" only.
That just means every time I want to decrease the flow, I just pick one edge from the min-cut (can be any one of them), and just cut it. And this change is also cumulative (the remaining edge of min-cut will also be the min-cut of the new graph.)
The problem then reduce to [find min-cut] -> [do dp O(n^3) and memorize the transition] -> [Use the transition to the final maximum answer to determine when to cut an edge].
Can anyone help me figure out what mistake am I doing ??
https://codeforces.me/contest/1525/submission/209717072
Figured out.Forgot to sort the positions. :(