Идея: BledDest
Разбор
Tutorial is loading...
Решение (awoo)
for _ in range(int(input())):
n = int(input())
s = input()
print("YES" if list(s) == sorted(s) else "NO")
1976B - Увеличение/уменьшение/копирование
Идея: BledDest
Разбор
Tutorial is loading...
Решение (Neon)
#include <bits/stdc++.h>
using namespace std;
using li = long long;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<li> a(n), b(n + 1);
for (auto& x : a) cin >> x;
for (auto& x : b) cin >> x;
li sum = 0, ext = 1e18;
for (int i = 0; i < n; ++i) {
sum += abs(a[i] - b[i]);
ext = min(ext, abs(a[i] - b[n]));
ext = min(ext, abs(b[i] - b[n]));
if (min(a[i], b[i]) <= b[n] && b[n] <= max(a[i], b[i]))
ext = 0;
}
cout << sum + ext + 1 << '\n';
}
}
1976C - Собеседование на работу
Идея: BledDest
Разбор
Tutorial is loading...
Решение (BledDest)
for _ in range(int(input())):
n, m = map(int, input().split())
bounds = [n, m]
a = []
a.append(list(map(int, input().split())))
a.append(list(map(int, input().split())))
bad = -1
badType = -1
cur = [0, 0]
ans = 0
types = [0 for i in range(n + m + 1)]
for i in range(n + m):
curType = 0
if a[0][i] < a[1][i]:
curType = 1
if cur[curType] == bounds[curType]:
curType = 1 - curType
if bad == -1:
bad = i
badType = 1 - curType
types[i] = curType
ans += a[types[i]][i]
cur[types[i]] += 1
res = []
for i in range(n + m):
val = ans - a[types[i]][i]
if bad != -1 and i < bad and types[i] == badType:
val = val + a[badType][bad] - a[1 - badType][bad] + a[1 - badType][n + m]
else:
val = val + a[types[i]][n + m]
res.append(val)
res.append(ans)
print(*res)
1976D - Инвертируемые скобочные последовательности
Идея: BledDest
Разбор
Tutorial is loading...
Решение (Neon)
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
string s;
cin >> s;
map<int, int> cnt;
int b = 0;
++cnt[b];
long long ans = 0;
for (auto& c : s) {
b += (c == '(' ? +1 : -1);
ans += cnt[b];
++cnt[b];
while (cnt.begin()->first * 2 < b)
cnt.erase(cnt.begin());
}
cout << ans << '\n';
}
}
1976E - Разделяемые перестановки
Идея: BledDest
Разбор
Tutorial is loading...
Решение (BledDest)
#include<bits/stdc++.h>
using namespace std;
const int N = 300043;
const int MOD = 998244353;
int add(int x, int y)
{
x += y;
while(x >= MOD) x -= MOD;
while(x < 0) x += MOD;
return x;
}
int mul(int x, int y)
{
return (x * 1ll * y) % MOD;
}
int nxt[N], prv[N];
bool exists[N];
int main()
{
int n, q;
scanf("%d %d", &n, &q);
for(int i = 1; i <= n; i++) nxt[i] = prv[i] = -1;
exists[n] = true;
vector<int> l(q), r(q);
for(int i = 0; i < q; i++) scanf("%d", &l[i]);
for(int i = 0; i < q; i++) scanf("%d", &r[i]);
for(int i = 0; i < q; i++)
{
int L = l[i], R = r[i];
if(exists[L])
{
exists[R] = true;
nxt[R] = nxt[L];
if(nxt[R] != -1) prv[nxt[R]] = R;
prv[R] = L;
nxt[L] = R;
}
else
{
exists[L] = true;
prv[L] = prv[R];
if(prv[L] != -1) nxt[prv[L]] = L;
nxt[L] = R;
prv[R] = L;
}
}
vector<int> seq;
int start = -1;
for(int i = 1; i <= n; i++)
if(prv[i] == -1 && exists[i])
start = i;
seq.push_back(start);
while(nxt[start] != -1)
{
start = nxt[start];
seq.push_back(start);
}
vector<int> cnt_segments(n + 1);
cnt_segments[seq[0]]++;
cnt_segments[seq[q]]++;
for(int i = 0; i < q; i++)
cnt_segments[max(seq[i], seq[i + 1])]++;
int ans = 1;
int places = 0;
for(int i = n; i >= 1; i--)
{
if(exists[i])
places += cnt_segments[i];
else
{
ans = mul(ans, places);
places++;
}
}
printf("%d\n", ans);
}
Идея: BledDest
Разбор
Tutorial is loading...
Решение (awoo)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
vector<int> h;
vector<vector<int>> g;
void dfs(int v, int p = -1){
for (int u : g[v]) if (u != p){
dfs(u, v);
h[v] = max(h[v], h[u] + 1);
}
}
int main() {
cin.tie(0);
ios::sync_with_stdio(false);
int t;
cin >> t;
while (t--){
int n;
cin >> n;
g.assign(n, {});
forn(i, n - 1){
int v, u;
cin >> v >> u;
--v, --u;
g[v].push_back(u);
g[u].push_back(v);
}
h.assign(n, 0);
dfs(0);
set<pair<int, int>> cur;
forn(i, n) cur.insert({h[i], i});
vector<int> ans;
int tmp = n;
while (!cur.empty()){
int v = cur.rbegin()->second;
while (v != -1){
--tmp;
cur.erase({h[v], v});
int pv = -1;
for (int u : g[v]) if (h[u] == h[v] - 1){
pv = u;
break;
}
v = pv;
}
ans.push_back(tmp);
}
for (int i = 0; i < int(ans.size()); i += 2){
cout << ans[i] << ' ';
}
for (int i = (int(ans.size()) + 1) / 2; i < n - 1; ++i){
cout << 0 << ' ';
}
cout << '\n';
}
return 0;
}
W contest, W editorial!
Is there any way to determine the difficulty rating of recently asked contest problems? Any guidance on this would be greatly appreciated.(Any extension ?)
You can see CLIST
add : after https in the hyper link.
whats the motivation behind problem-C, i was blank after reading the problem. Any tips for such problem.
The O(n) solution is very teidous to implement, but applying prefix sums and binary search makes it easy. So sacrifice runtime for coding time, I guess. Here's my submission for reference: 263515621
Can you briefly explain what exactly are you running the binary search on?
Sure, I'm binary searching the last point up until it stands that there are less good programmers than N and also less good testers than M before. Here, good programmers mean candidates whose programming skill is better, and good testers mean candidates whose testing skill is better. Both of them are non-decreasing functions (the ones that tell you how many are before a point), thus binary search can be applied.
Yeah got it, but now I want to know your full solution, could you explain that as well?
I still don't see how binary search can be used. I just understood some of the top submissions and implemented similarly.
The main observation of their solution is that one group is always finished selecting first. Lets say programmers quota is finished first. Then what we will do is send remaining workers to test. In this configuration we calculate the net sum (note that we have employed one extra tester). We can calculate the answer for all i's when it is a tester being removed by simply subtracting b[I] from the net sum. As for removing the programmers, we would have to find a different configuration by employing n + 1 programmers this time. Then calculate net sum. Then from the initial i's when programmers were recruited, we subtract a[i] from the net sum of the second configuration.
I think it's pretty clear from the code, but I'll explain it anyways. So make 3 prefix sums, let's call them x, y and z. Let $$$x_{i}=x_{i-1}+max(a_{i},b_{i})$$$. Let $$$y_{i}=y_{i-1}+a_{i}$$$. Let $$$z_{i}=z_{i-1}+b_{i}$$$. Now take the sum until the index we searched before in $$$x$$$, because up until this point, we can decide to which team we want to assign the candidate. From this point, either everyone will get hired as a programmer or as a tester. Use $$$y$$$ and $$$z$$$ for that. Also you have to adjust your answer a bit, based on whether the candidate who didn't come is before or after the searched point. It's very similar to the solution you explained above.
thanks
Can you tell me how can i fix my code, I feel my idea is similar. 263342854 Failing testcase is
Basically I am not able to find that point correctly through bs, where i get all programmer or tester.
Hi. The problem with your code is, that you're searching for the last good point, inclusive. And the lowest possible you can get is 0. But sometimes, taking the better of the 2 skills even only at 0 will produce a bad result.
Edit: Forget the last sentence I wrote in Rev. 1, I'm tired :(
Ok,Np
Can you just explain this part of your code. I feel I just have to calc no_more_p correctly.
https://codeforces.me/contest/1976/submission/263582707 Thanks for you soln, my code worked , I have not handled corner case, for the case i am not able to get lower_bound of tester or programmer.
O(n) solution -> 263543487
i have a approach.
instead of hiring n+m employees.
hire (n+1 programmers & m testers) = set1 or hire(n programmers and m+1 testers) = set2.
now compare both these new arrays you'll always notice only one index will have a change in their role, the others indexes will not have a change because it's just off by one.
do prefix of both set1 and set2
and iterate over each and if he's from set1 and a programmer remove him from the set1 where you hired n+1 programmers and you would have hired n+m employees or if he's a tester remove him from set2 where you hired m+1 testers and you would have hired n+m employees.
Problem C is much simpler when using DP
https://codeforces.me/contest/1976/submission/263321988
We use DP to calculate (i, num_programmer, num_tester): the skill of the team starting from i if we already have num_programmer and num_tester
Then for each i, we calculate current_skill + dp(i+1, num_programmer, num_tester), then assign the role to candidate i and update current_skill, num_programmer, num_tester
What about time complexity?
Time complexity is O(n)
nguyenquocthao00 That is very cool! But, I thought it would give, Memory limit exceeded because the n+m+1 range is up to 2*1e5 as you are having three states in dp?
For this problem I use map to cache state, not array
This is great. So much cleaner than the binary-search + prefix/suffix solution I implemented.
https://codeforces.me/contest/1976/submission/264856786
I'm curious why the time complexity is O(n). Why not O(n*a*b)?
Thanks
Because for each i, (i, num_programmer, num_tester) can have at most 2 different values of num_programmer, num_tester; because the person we removed can be a programmer or a tester
For problem C, I wrote a brute-force solution 263363415, but it worked magically.
Let's divide what we need into two parts, if i is fixed,
the left part [0, i-1], which can be calculated by simulation directly.
the right part [i+1, n-1], which can also be calculated if the selected programmer
pc
and testerstc
are known. During this progress, we cache results by tuple(i+1, pc, tc)
.So the solution is very simple: if the right part is cached, just use it; if not, then calculate and save in cache. I don't know why, but seems that only 2 times of brute-force is needed.
By caching , doesn't it exceed time complexity? Because we need to cache (n+m+1 * n * m) states. Thanks
Since it's already decided which person will go where and there isn't really a choice, there wont be many values of 'n' and 'm' provided to 'i'.
My logic is as follows:
Compare these 2 sequences.
seq_1: P T x P T i ...
seq_2: P T T x T i ...
where 'x' is the index excluded,'i' is the index we are currently in in the recursive process. 'n1' and 'n2' is the number of more programmers it need to take. Similarly, 'm1' and 'm2' means more testers it needs to take.
Note that index 2 changed from x->T and index 3 changed from P->x. So m2 = m1-1 and n2 = n1+1, meaning in second sequence it needs to take one less tester as a previously excluded guy was appointed as a tester and also it needs to take one more programmer as a previous programmer is being excluded. For all scenarios it can be calculated.
So, n2 and m2 don't seem to vary much. My submission.
I created a blog discussing an alternate $$$O(n \cdot log(n))$$$ solution for D: Invertible Bracket Sequence. If you want to test your understanding of this conept, I also created some practice problems
Loved C
Alternative DP solution to F:
The problem choose the largest connected set of edges that has exactly $$$k$$$ leaves and contains the root can be solved with DP. Let $$$\mathrm{dp}_{i, j}$$$ be the answer for the subtree rooted at $$$i$$$, when taking exactly $$$j$$$ leaves. The transitions are straightforward — same as in knapsack. This gives us a $$$\mathcal{O}(n^2)$$$ solution.
Notice that the sequence $$$(\mathrm{dp}_{i, 0}, \mathrm{dp}_{i, 1}, \dots)$$$ is always concave for a fixed $$$i$$$. This can be easily proven with induction — the sequence in leaves is concave, and the max-plus convolution of two concave sequences is also concave. This gives us a faster solution. Instead of storing the DP directly, store the adjacent differences (they will always be sorted in non-increasing order). Then, we can use small-to-large merging, which gives us $$$\mathcal{O}(n \log^2 n)$$$ complexity.
More about max-plus convolution
Submission
I couldn't figure out how to implement the range condition of balance[i] <= 2 * balance[l — 1] bit, later figured some people used data structures to check if the max of the range is lesser than 2 * balance[l — 1].
But I must say that the simple map implementation is the best I have seen in two days, how did you even come up with that?
Nice contest ! Thanks for this nice editorial
Can someone explain why the time complexity of the solution for problem D is O(nlogn). Like how the while loop is taking logn time only. I can visualize it somehow but I can't prove it.
The while loop isnt taking log(n) time. More like the most number of items that will be removed using the while loop is at max n summed over all of i, log(n) comes from sorting the bal[i] values because it's a map and we want to remove the smallest few "blocked" matches of potential r's.
Can anyone tell me why does my O(n) solution fail for problem C
263511839
UPD: I compressed my code further and I'm surprised to find that I now have the shortest correct solution for the problem.
bro explain
Same I thinked that but unable to come up how to count it ??
Hii, can you please explain significance of this
if(potential & 1ll) bucket[potential/2] = 0;
The "potential" is $$$bal_i$$$ in the official editorial. Consider the $$$bal_i$$$-$$$i$$$ graph. When we flip a segment of parentheses ending with index $$$i$$$, we find a previous index $$$k$$$ with $$$bal_i$$$ and flip the graph between them across $$$y = bal_i$$$. The flipped $$$bal$$$ should be no smaller than zero, so if $$$\exists j, k<j<i$$$ s.t. $$$bal_j > 2bal_i$$$, it flips down below zero and all the $$$k$$$s before $$$j$$$ don't count, and the bucket will be reset.
In problem F, how to prove greedy is correct?
I will use the power of induction, going over the number of vertices in the tree.
The base is immediately obvious for
n = 1 or n = 2
.Consider a vertex
v != root
. If we choose some set of leaf nodesL = {l1, l2, ..., lk}
in its subtree, how many bridges do we remove? This number equals to the size of the union of edges of all paths fromli
tov
, plus some numberC
, which does not depend on leaves inL
.Actually,
C
depends only on other leaf nodes we chosen — the ones not inL
. It can be shown thatC
equals to the number of remaining bridges on the path fromv
to root after we choose all leaves do not belongL
.This means we can choose some leaf nodes outside of
v
's subtree, and then optimize leaves inL
independently from others, if|L| = const
(if their number remains constant). By inductive hypothesis forv
's subtree, greedy is the optimal way.Suppose we have some solution which is not greedy. Let's show that there is a vertex
v
, which subtree solved non-greedy (and we can make it better). Just take 2 leaves:l1
, which will be taken in greedy solution, andl2
, which was taken instead ofl1
. Choosev = LCA(l1, l2)
.P.S. May be there is some inaccuracy in my proof, such as
deg(root) = 1
condition in the problem, but induction does not use it. We need this condition only to find out that we should connect root with 2k-1 leaves, so there is no problem.What if LCA is the root?
In before, the root does not only have 1 son, because you prove the induction hypothesis not just for that tree, but for any tree with n nodes.
Why does this code gives TLE? As far as I know it is clearly O(nlogn).
263649473
nvm found it
For Problem F, instead of the implementation idea mentioned in the editorial, if we simply brute force (i.e. when we take the longest path, simply update the distances of all leaves whose edge numbers decrease), what would be the complexity? I am struggling to find a construction that makes the number of updates worse than O(N^1.5)
In D tutorial can someone explain these lines? I couldnt get it:
"Luckily, it is easy to fix, if the current balance balr and there is balance x in the map such that x<balr−x , we can remove all its occurrences from the map (i. e. remove the key x from the map). This fix works because the current position lies between the left border (which we store in the map) and the potential right border, and the current balance is too high (which violates the first condition)."
Amazing E problem, thank you BledDest !
Just got this e-mail that plague has been detected in my code and I am genuinely so confused. My solution for D problem is only slightly similar to one random guy's solution!? This contest was a huge rating boost for me. Please review this 263359271 This is my submission and the following is the other guy's submission: 263354669 MikeMirzayanov
best content
I think in the editorial of B, in the first point we should increase a[i] to b[n+1] instead of a[n+1] to b[n+1] because a's size is n only.
Can anyone point out my mistake in this solution for C, I am not able to figure out where am i going wrong. I has implement in exactly same way as given in the editorial.
Submission Link: https://codeforces.me/contest/1976/submission/283706262
can someone tell me why is this wrong?
~~~~~~~~~~
void solve(){
}
~~~~~~~~~~~~
I felt that the subintervals intuitively pushed for DP and prefixes on D, here's a DP solution to that.