Idea: BledDest, preparation: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
t = int(input())
for i in range(t):
n = int(input())
if n % 7 == 0:
print(n)
else:
ans = -1
for j in range(10):
if (n - n % 10 + j) % 7 == 0:
ans = n - n % 10 + j
print(ans)
Idea: BledDest, preparation: awoo and Neon
Tutorial
Tutorial is loading...
Solution (awoo)
for _ in range(int(input())):
s = input()
print(min((len(s) - 1) // 2, s.count('0'), s.count('1')))
Idea: BledDest, preparation: Neon
Tutorial
Tutorial is loading...
Solution (awoo)
for _ in range(int(input())):
hc, dc = map(int, input().split())
hm, dm = map(int, input().split())
k, w, a = map(int, input().split())
for i in range(k + 1):
nhc = hc + i * a
ndc = dc + (k - i) * w
if (hm + ndc - 1) // ndc <= (nhc + dm - 1) // dm:
print("YES")
break
else:
print("NO")
Idea: BledDest, preparation: Neon
Tutorial
Tutorial is loading...
Solution (Neon)
#include <bits/stdc++.h>
using namespace std;
const int N = 1001;
int main() {
vector<int> d(N, N);
d[1] = 0;
for (int i = 1; i < N; ++i) {
for (int x = 1; x <= i; ++x) {
int j = i + i / x;
if (j < N) d[j] = min(d[j], d[i] + 1);
}
}
int t;
cin >> t;
while (t--) {
int n, k;
cin >> n >> k;
vector<int> b(n), c(n);
for (int &x : b) cin >> x;
for (int &x : c) cin >> x;
int sum = 0;
for (int x : b) sum += d[x];
k = min(k, sum);
vector<int> dp(k + 1, 0);
for (int i = 0; i < n; ++i) {
for (int j = k - d[b[i]]; j >= 0; j--) {
dp[j + d[b[i]]] = max(dp[j + d[b[i]]], dp[j] + c[i]);
}
}
cout << *max_element(dp.begin(), dp.end()) << '\n';
}
}
Idea: BledDest, preparation: awoo
Tutorial
Tutorial is loading...
Solution (awoo)
#include <bits/stdc++.h>
#define forn(i, n) for (int i = 0; i < int(n); i++)
using namespace std;
struct edge{
int v, u, w;
};
vector<int> pr, rk;
int getp(int a){
return a == pr[a] ? a : pr[a] = getp(pr[a]);
}
bool unite(int a, int b){
a = getp(a), b = getp(b);
if (a == b) return false;
if (rk[a] < rk[b]) swap(a, b);
rk[a] += rk[b];
pr[b] = a;
return true;
}
int main() {
int n, m;
scanf("%d%d", &n, &m);
pr.resize(n);
rk.resize(n);
vector<edge> es(m);
forn(i, m){
scanf("%d%d%d", &es[i].v, &es[i].u, &es[i].w);
--es[i].v, --es[i].u;
es[i].w *= 2;
}
vector<int> ev(1, 0);
forn(i, m) forn(j, i + 1) ev.push_back((es[i].w + es[j].w) / 2);
sort(ev.begin(), ev.end());
ev.resize(unique(ev.begin(), ev.end()) - ev.begin());
vector<long long> base;
vector<int> cnt;
for (int x : ev){
sort(es.begin(), es.end(), [&x](const edge &a, const edge &b){
int wa = abs(a.w - x);
int wb = abs(b.w - x);
if (wa != wb) return wa < wb;
return a.w > b.w;
});
forn(i, n) pr[i] = i, rk[i] = 1;
long long cur_base = 0;
int cur_cnt = 0;
for (const auto &e : es) if (unite(e.v, e.u)){
cur_base += abs(e.w - x);
cur_cnt += x < e.w;
}
base.push_back(cur_base);
cnt.push_back(cur_cnt);
}
int p, k, a, b, c;
scanf("%d%d%d%d%d", &p, &k, &a, &b, &c);
int x = 0;
long long ans = 0;
forn(q, k){
if (q < p) scanf("%d", &x);
else x = (x * 1ll * a + b) % c;
int y = upper_bound(ev.begin(), ev.end(), 2 * x) - ev.begin() - 1;
ans ^= (base[y] + (n - 1 - 2 * cnt[y]) * 1ll * (2 * x - ev[y])) / 2;
}
printf("%lld\n", ans);
return 0;
}
Idea: BledDest, preparation: BledDest
Tutorial
Tutorial is loading...
Solution (BledDest)
#include <bits/stdc++.h>
using namespace std;
typedef pair<long long, int> val;
#define x first
#define y second
const int N = 200043;
val operator +(const val& a, const val& b)
{
return make_pair(a.x + b.x, a.y + b.y);
}
val operator -(const val& a, const val& b)
{
return make_pair(a.x - b.x, a.y - b.y);
}
val T[4 * N];
val Tfull[4 * N];
int f[4 * N];
val getVal(int v)
{
if(f[v] == 1)
return Tfull[v] - T[v];
return T[v];
}
void push(int v)
{
if(f[v] == 1)
{
f[v] = 0;
T[v] = Tfull[v] - T[v];
f[v * 2 + 1] ^= 1;
f[v * 2 + 2] ^= 1;
}
}
void upd(int v)
{
Tfull[v] = Tfull[v * 2 + 1] + Tfull[v * 2 + 2];
T[v] = getVal(v * 2 + 1) + getVal(v * 2 + 2);
}
void setVal(int v, int l, int r, int pos, val cur)
{
if(l == r - 1)
{
f[v] = 0;
Tfull[v] = cur;
T[v] = cur;
}
else
{
push(v);
int m = (l + r) / 2;
if(pos < m)
setVal(v * 2 + 1, l, m, pos, cur);
else
setVal(v * 2 + 2, m, r, pos, cur);
upd(v);
}
}
void flipColor(int v, int l, int r, int L, int R)
{
if(L >= R) return;
if(l == L && r == R)
f[v] ^= 1;
else
{
push(v);
int m = (l + r) / 2;
flipColor(v * 2 + 1, l, m, L, min(m, R));
flipColor(v * 2 + 2, m, r, max(L, m), R);
upd(v);
}
}
val getVal(int v, int l, int r, int L, int R)
{
if(L >= R) return make_pair(0ll, 0);
if(l == L && r == R) return getVal(v);
int m = (l + r) / 2;
val ans = make_pair(0ll, 0);
push(v);
ans = ans + getVal(v * 2 + 1, l, m, L, min(R, m));
ans = ans + getVal(v * 2 + 2, m, r, max(L, m), R);
upd(v);
return ans;
}
int n;
vector<int> g[N];
int p[N], siz[N], d[N], nxt[N];
int tin[N], timer;
map<pair<int, int>, int> idx;
long long sum;
int cnt;
int active[N];
int active_cnt;
void dfs_sz(int v)
{
if (p[v] != -1)
{
auto it = find(g[v].begin(), g[v].end(), p[v]);
if (it != g[v].end())
g[v].erase(it);
}
siz[v] = 1;
for (int &u : g[v])
{
p[u] = v;
d[u] = d[v] + 1;
dfs_sz(u);
siz[v] += siz[u];
if (siz[u] > siz[g[v][0]])
swap(u, g[v][0]);
}
}
void dfs_hld(int v)
{
tin[v] = timer++;
for (int u : g[v])
{
nxt[u] = (u == g[v][0] ? nxt[v] : u);
dfs_hld(u);
}
}
// [l; r] inclusive
void flipSegment(int l, int r)
{
flipColor(0, 0, n, l, r + 1);
}
// [l; r] inclusive
val get(int l, int r)
{
return getVal(0, 0, n, l, r + 1);
}
void flipPath(int v, int u)
{
for (; nxt[v] != nxt[u]; u = p[nxt[u]])
{
if (d[nxt[v]] > d[nxt[u]]) swap(v, u);
flipSegment(tin[nxt[u]], tin[u]);
}
if (d[v] > d[u]) swap(v, u);
flipSegment(tin[v], tin[u]);
}
val getPath(int v, int u)
{
val res = make_pair(0ll, 0);
for (; nxt[v] != nxt[u]; u = p[nxt[u]])
{
if (d[nxt[v]] > d[nxt[u]]) swap(v, u);
// update res with the result of get()
res = res + get(tin[nxt[u]], tin[u]);
}
if (d[v] > d[u]) swap(v, u);
res = res + get(tin[v], tin[u]);
return res;
}
void activate_vertex(int x)
{
int id = 0;
if(p[x] != -1)
{
id = idx[make_pair(x, p[x])];
val v = getPath(0, p[x]);
flipPath(0, p[x]);
sum -= v.x;
cnt -= v.y;
v = getPath(0, p[x]);
sum += v.x;
cnt += v.y;
}
cnt++;
sum += id;
setVal(0, 0, n, tin[x], make_pair((long long)(id), 1));
active[x] = 1;
active_cnt++;
}
void init_hld(int root = 0)
{
d[root] = 0;
nxt[root] = root;
p[root] = -1;
timer = 0;
dfs_sz(root);
dfs_hld(root);
}
int currentSize[N];
int dfsSolution(int x, vector<int>& edges)
{
if(!active[x]) return 0;
currentSize[x] = 1;
for(auto y : g[x])
if(y != p[x])
currentSize[x] += dfsSolution(y, edges);
if(currentSize[x] % 2 == 1)
edges.push_back(idx[make_pair(x, p[x])]);
return currentSize[x];
}
void outputSolution()
{
vector<int> edges;
if(cnt * 2 == active_cnt)
{
dfsSolution(0, edges);
sort(edges.begin(), edges.end());
}
printf("%d", int(edges.size()));
for(auto x : edges) printf(" %d", x);
puts("");
fflush(stdout);
}
void processQuery(int v)
{
activate_vertex(v);
if(cnt * 2 == active_cnt)
printf("%lld\n", sum);
else
puts("0");
fflush(stdout);
}
int main()
{
scanf("%d", &n);
for(int i = 1; i < n; i++)
{
int x, y;
scanf("%d %d", &x, &y);
--x;
--y;
g[x].push_back(y);
g[y].push_back(x);
idx[make_pair(x, y)] = i;
idx[make_pair(y, x)] = i;
}
init_hld();
activate_vertex(0);
while(true)
{
int t;
scanf("%d", &t);
if(t == 3)
break;
if(t == 2)
outputSolution();
if(t == 1)
{
int v;
scanf("%d", &v);
--v;
processQuery(v);
}
}
}
video Editorial for Problem D : VIDEO LINK
video Editorial for problem C : VIDEO LINK
The blend of BFS with Knapsack makes problem D very beautiful if we see it from the perspective of of DSA based interview. An ideal problem to test your graph and Dp skills and reduce the problem to a known simpler problem.
can u please elaborate how to use bfs to calculate the no of steps
since the constraints are not so big of bi, we do a brute force kind of thing to create the adjacency list.
for each, i in the range[1,1000] I can calculate where I can go from i for example, suppose i=5 so
I can go to 10 as 5+(5/1) here x is 1
I can go to 7 as 5+(5/2) here x is 2
I can go to 6 as 5+(5/3) here x is 3
and so on..
Here is a small observation if we choose x>5 then we will not reach anywhere and unnecessarily we will waste one operation so we will choose x<i after that it is simple we create an adjacency list and a distance array and simply do a bfs from 1 calculating the distance Here is my solution look at the code below fast ip/op and above the while loop in the main
In my code what I did is that I create an adjacency list where adj[i] is a set that contains all the destinations I can reach from i. I have used the set because there are many destinations that are repeated. In the creation of the adjacency list, I used two for loops the outer loop fixes the i and the inner loop creates all the x. After the creation of the adjacency list, I did a bfs with the source node as 1 and I calculated all the distance of each node from the source node that is 1.
I tried calculating number of steps with DFS instead of BFS, but I am getting wrong answer.Can you please tell that why DFS will not work here?
My Submission for your reference — Click Here
I may not give you give a concrete answer to your question because whenever there is a question of finding the shortest path or minimum distance in an unweighted graph the only algorithm that pops in my mind is bfs I have never applied dfs or rather never thought of it but I think you can apply dfs it depends on how you have applied the dfs you have to do a little tweak in it
the difference between dfs and bfs is the order in which you visit your neighbour In bfs you first visit your neighbouring nodes which are closest to your source node
In dfs you visit the neighbour in the order you have stored in adjacency list or adjacency matrix
Let's take an example not related to the actual problem lets take the below graph
1 <-> 2 <-> 3
1 <-> 3
if do the conventional dfs and start from 1 the distance from 3 will be 2 but if you follow the 2nd path it will be 1 if you follow the first path and 2nd path will never be visited because 3 is already visited so the tweak you can do is that as you are returning after processing all the nodes you mark it unvisited like you started from 1 you visited 2 calculated its distance which is 1 then you visited 3 calculated its distance which is 2 now as you are returning from 3 mark it unvisited because next time when you take the 2nd path 3 will remain unvisited and you can relax its distance from 2 to 1. So as you can see this tweak is a little cumbersome and bfs is more intuitive that is why go for bfs but you can apply dfs but you have to tweak it
Thanks for this wonderful explanation. As you said, to make the nodes unvisited after processing so as to relax the distances, I am doing the same thing in the dfs function of my Solution.
Following is the snippet in the DFS function which I am referring to:
(Basically, through DFS, Number of steps for some n, are incorrectly calculated. But through BFS, number of steps are correctly calculated, and, I am not able to figure out this reason.)
yeah, I noticed. I am also not able to understand why this is not working sorry :(
When cnt < steps[n], you should not only update the vertex n, you should update all vertex that are reachable from the vertex n. So that is the reason, why dfs never(I mean only in trees) used to calculate the shortest paths. You can actually try "fixing" your dfs by returning 0 if and only if cnt >= steps[n], but with this implementation dfs works in (n! * n) as far as I remember. This dfs can be useful if n < 15, and you need to work with all possible paths. So, in this problem bfs is essential
DFS is not guaranteed to find the shortest path.
In problem E there are only 2*m different spanning trees. For each edge there is only one interval, when it is useful.
Can you please elaborate a bit more about how to find these 2*m breaking points?
No test (including hacks) has more than $$$m$$$ good MSTs, I'm inclined to think that is the true limit since I can't find a counter case.
I used this to solve E in a way that works in $$$O(X m \log(m)\log(10^8) + k(n + \log(m))$$$, where $$$X$$$ is the number of MSTs which are optimal for some $$$X$$$.
So if we have calculated $$$x_1, x_2, \ldots, x_{i - 1}$$$, we know that the next range with the same MST must go from $$$x_{i - 1}$$$ to some $$$x_i$$$. So we can just binary search for this value $$$x_i$$$. In each step of the binary search we will have to check if the MST is the same. We can trivially do this in $$$O(m logm)$$$, so we get a total of $$$O(m^2 \log(m)\log(10^8))$$$.
Now for a query we can just binary search to find the correct MST, and compute the absolute value in $$$O(n + \log(m))$$$ time. You can store prefix sums in advance to be able to do this in $$$O(\log(n) + \log(m))$$$ but it isn't necessary to get AC.
So the total complexity is $$$O(X m \log(m)\log(10^8) + k(n + \log(m))$$$ or $$$O(X m \log(m)\log(10^8) + k(\log(n) + \log(m))$$$ depending on implementation.
Code — 144725137
Thanks! One small point, should the first term be $$$O(m^2\log m \log M)$$$, where $$$M = 10^8$$$?
Here is my python code with complexity $$$O(m^2\log m \log M + k \log m)$$$ https://codeforces.me/contest/1633/submission/144881605
Yup, it should be, fixed it now.
Here is my proof why for each edge there is only one interval.
Let's prove that for q > w of the edge, its selected in some range [l, r] (q > l, q > r). Now, key insight. You can find out whether edge is selected in MST in other way: join everything (literally everything) up to edge w with weight less than it, and calculate number of components (it's not a tree anymore). Edge will be selected in MST if and only if number of components decrease with this additional edge.
Now, consider what we have to join for some q. We have to join everything from w to q + (q — w). Because only those weights satisfy |weight — q| < w, where w is our edge for which we prove this fact. And when we increase q then left 'side' of subrange is fixed and only right side of 'subrange' is increasing. So we only add new edges into our 'join everything' graph. So edge with weight w will be selected while it decrease number of components. In other words, it will not be selected when vertices it connects ends up within same component. Case when q < w is completely symmetrical.
Looks legit.
From now on: for q > w edge is used for single range, and for q < w edge is used for single range. But it's easy to see that for q = w we use edge with weight w. Possible loophole: what if several edges has same weight and same endpoints, you can't use all of them, you'll use only one of them. But I think it can be proven if done more carefully.
it was very good contest but weak testcases in C was bad point in it
Out of curiosity, what was the purpose of having both explicit and compressed queries in problem E? Is there some kind of solution that this was meant to cut off? Right now, it seems to me like the problem would be the same with only the compressed queries.
It could be to:
decrease the time-consuming in input and output
reduce the size of the input file
include more queries for thorough checking
Problem C can be solved in O(1) time as follows:
Let's assume that Monocarp spends $$$x$$$ coins on upgrading their armor and $$$y$$$ coins on upgrading their attacking power. The total number of turns required to defeat the monster would be $$$\frac{h_M}{d_C + wy}$$$ and the maximum number of turns monocarp can survive is $$$\frac{h_C + ax}{d_M}$$$. Thus, we have:
Note the +1 there, since Monocarp is the one who attacks first. Obviously, we also have:
Because if Monocarp can defeat the monster by using only some of their coins, they will be able to do the same while using all of their coins. Substituting the second equation into the first, we get a quadratic equation, which can be solved to get two roots (the roots may be equal). Let's call the two roots $m$ and $$$n$$$. If there exists a positive integer in the range $$$(m, n)$$$ then Monocarp can defeat the monster and the answer is YES, otherwise the answer is NO.
when you substitute suppose x= k-y and solve 1st equation. There will be terms such as hc*dc and many more, which will cause overflow.
For editorial in problem E, I don't quite understand for the last 4 paragraphs and here are my questions
Why knowing the weight of the MST from the start of the range isn't enough?
You mentioned that (from the last 3 paragraphs) we want to add more ranges. What does it mean by this?
Thanks. Cool problem btw
Hmm I see. But how does it translate to
At the implementation part?
Sorry I shouldn't say that the cost doesn't change. It should be the number of edges whose weight is greater than w doesn't change.
I see. Thanks for your explanation. I think ExplodingFreeze solution is more intuitive for me (than the editorial one). The binary-search-ing the MST idea is insightful for me and it's very cool somehow :)
in problem D, do we really need dp for calculating the minimum operation to get the number i from 1, can we take always x=1? and in end take x according to the remaining value. Will it be not optimal??
It is not necessarily possible you can choose the remaining value. Consider $$$a_i = 12$$$.
Lets list the values of $$$\lfloor\frac{a_i}{j}\rfloor$$$ for all $$$1 \leq j \leq n$$$ — $$$[12, 6, 4, 3, 2, 2, 1, 1, 1, 1, 1, 1]$$$.
Note that we can generate all values from $$$1$$$ to $$$\sqrt{a_i}$$$, but for values larger than that we have $$$a_i - \sqrt{a_i}$$$ values remaining but only $$$\sqrt{a_i}$$$ possible divisors, so the majority are unreachable.
<Ignore what was written here in previous revisions if you saw it. I somehow forgot we need the costs lol>
Got it , Thanks !
Problem D :How to calculate the minimum number of operations to get number i from 1 using BFS or Dp?
you can use both of them by bfs you can make loop from 1 to the number x and make edge from x to x + x/i
Task B is easier than task A :)
true , i spent more time in A than B and C combined
In Problem D, why are the maximum operations to convert 1 into b is 12, why not 10, if we fix x as 1 and keep iterating we would reach 1024 in 10 operations which exceed the maximum value of b. how can you make sure that there is no case where operations would exceed 12.
No,we need at max 12 operation to reach(try on number which is not power of 2). Just write bfs or dp code for calculating operation and then take max you will get 12.
OmPrakashIIT Can we calculate it mathematically?
In case someone is interested, I have made video solution for Problem A-D in Chinese (video BV1pq4y1h72x)
It is hard for me to understand E's paragraph 4.why m^2 swaps can lead to m^2 's different arrangements. Could someone share his idea with me?
Can anyone please help why I am getting in run time error in Problem E https://codeforces.me/contest/1633/submission/144915859
Failing test case
Thanks a lot for your help. But after fixing these test case still, I am getting runtime error 144946651.
Maybe there's another solution for problen E(sorry for awful English): First I sort all the edges and queries(it takes O(q log q) time but fast enough... )
I use "wqs binary search" to solve a famous problem:"If every edge has a color (B/W),find a MST that has exactly x white edges) (m+1)*n times. And then I get a O(nm^2 log w + q log q + nq) solution : https://codeforces.me/contest/1633/submission/144789682
Moreover I note that we can use some geometry trick to make it faster( Or just use Lichao Segment Tree to make O(nq)->O(q log n) but n is only 50....)
Finally I got a O(nm^2 log w + n^2m + q log q) solution : https://codeforces.me/contest/1633/submission/144802434 , that w is the maximum edge's weights (10^8 in this problem ). It runs fast enough and I think we can let m = 1225( 50*49/2 ) and this solution will be more excellent than the standard solution (that m^3 log m) ,Maybe ?
Problem F's datas are too weak and the time limit is too large,so that many solutions with higher complexity can even pass.BTW,once I built a wrong Heavy-Light Decomposition on it and got Accepted...
I really don't understand solution of problem F. Maybe because i'm specialist. Can someone elaborate it?
You need to learn Heavy Light Decomposition or Link Cut Tree,but I guess you haven't learned that,problem F is not an easy thing for beginners.
Very good editorial and problems as usual. Ths
In Problem E, while following the tutorial, ensure that while sorting edges based on their $$$|w_i-x|$$$ value, in case of a tie between two edges, you always pick the larger edge first. The reason is this: We calculate the MST at the start of each range $$$[a, b)$$$. For any $$$x \ge a, x \lt b$$$, we use the MST cost at $$$a$$$, and decrease it by (number of edges whose weight $$$> a$$$) $$$(x - a)$$$ times. This decrease (or 'number of edges') is maximized when we pick the largest possible edges to form our MST.
Can anyone explain why in problem D, the max weight is limited to 12*n? I changed the max weight in my knapsack from k to 12*n and got accepted, but I am not sure why.
Because the maximum number of steps required to convert 1 to any of the b[i] = 12. So, the upper bound is 12 * n
Oh thanks, i actually forgot about the constrains, thank you. I couldve used some other bigger multiple as well, 12 is just a tight bound. Got it! Thanks!
You don't actually need to know the upper bound. You could've guessed wrong. You can just compute the maximum possible cost (the cost of upgrading every single array element, which is the sum of all costs). If it's smaller than $$$k$$$, then no need for knapsack
Can anyone share their solution to F using Link/Cut tree ? Thanks in advance
I have a doubt in the Make Them Equal Problem
Test case —
Output — 9
Here 7 can be made in 3 moves — 1+floor(1/1)+floor(2/1)+floor(3/1) and for the 2 we use another 1 move This strategy gives us the maximum knapsack value as 10 i.e. it allows us to choose 2,6,2 as the weights in 4 allowed moves. Then why the output is 9 ???
You Noob
You got some misunderstanding on this problem. The correct process is that 1+floor(1/1)=2 2+floor(2/2)=3 3+floor(3/1)=6 6+floor(6/6)=7 Then 7 can be made in 4 moves.
Can anyone tell me how to count the prefix and optimize the algorithm with time complexity O(k (log (m)) in the second half of the e problem? I see that in the problem solution, each number is simply divided into increasing or decreasing, but what if a number happens to be in this interval (that is, the number happens to decrease first and then increase in this interval)? Another question is why this complexity is not O (K (log (m ^ 2))? After all, there are m^2 such intervals. (I'm very sorry, my English level is not high)
From these intervals if you check their mst edges you will see that most of them are the same. In reality there are at most m different MST (I don't have a proof for that but couldn't find any counterexamples). So I did a binary search to find them Then I added the edges to solve the problem you comment. Having 2m points of change
Thank you! But,emm...I am sorry to say that I can't understand what you said.But I know the answer of my problem in the end.I didn't see that it add a interval boundary on each w_i in the standard code,if do that,every point in every intervals are just increase or decrease.There is a huge different between my mother tongue and English.I don't know if you can understand my point.
Sorry, English isn't also my mother tongue and yesterday I was trying to be short because I was writing from my phone. What we try to do here is: - 1 find all spots/intervals where the MST (minimum spanning tree) changes - 2 add also all weights to this intervals (as you commented) to be sure that for a given interval any edge always increases or decreases its weight. For 1 you could go for (w_i + w_j) / 2 for all pair of weights (the spots where two different edges changes its order) but this will generate O(m^2) and most of these spots don't change the MST. So imagine that after you calculated all (w_i + w_j) / 2 you have a list like this intervals=[0, 6, 10, 16... , 120, 200]. You could calculate mst[interval[0]], mst[interval[end]]. - If those 2 mst are the same then all the other intervals could be eliminated, and also mst[end] (We are sure that the MST don't change in this range). - If they are different you calculate mid=mst[interval[(ini-end)/2]] and recursive apply the same to [0,mid], [mid,end] In the end you should have something like O(m) intervals, so you did this binary search m times giving you a complexity of O(mlogm). Then you should add (2) all the edges. I hope this was a little more clearer than my previous explanation. You could see my implementation in #145770276 I used the "cuts" variable to calculate all possible intervals and the "edgeCuts" the real ones. Sorry my code is a little messy because I did a lot of changes through various days. I hope this helps :)
First,I want to thank you for spending so much time answering me.In fact,this was much more clearer than your previous explanation.I have already understood what you said and learned a lot from your reply. Although I can not use Java,I still tried to read a lot your code.And I have to say that you are wise.Your code's time complexity is better than the standard code.But I am not so clever to prove there are n real MST.And I also can't to give a counterexample.But there is a truth that your code is better.Thanks!
Oh,on more thing.I saw a discuss about this question just now,just at the top of this page.I'll read more about it.
OK, now I know why there are just n "real" MST. Thanks a lot!
Problem E can't be solved by Prim's Algorithm or can it?
For problem D how exactly we can get a safe upper bound for maximum possible weights? Do we first usually code for upper-bound and then find the solution? As we can't code dp solution for the second half if we don't know that upper bound is small.
Problem F's datas are not enough to test time limit . Test cases should be more accurate .However problem was good
For problem D it is also possible to precalculate a map with 1000 entries with the minimum operations needed to reach the number from 1 and just paste it into the program :D . Then, since it's precalculated at the member's computer, can use a slow O(N^3) algorithm instead of the O(N^2) dp or the BFS approach.