From d63469099b25ddac3b2622602f78b9290c8d82a8 Mon Sep 17 00:00:00 2001 From: Sebastian Serth Date: Tue, 12 Mar 2019 13:01:25 +0100 Subject: [PATCH] Incorporate feedback from PR review --- app/assets/javascripts/base.js | 3 +- .../javascripts/channels/la_exercises.js | 4 +- app/models/exercise.rb | 174 +++++++++--------- 3 files changed, 91 insertions(+), 90 deletions(-) diff --git a/app/assets/javascripts/base.js b/app/assets/javascripts/base.js index 9b8fba25..6e7dd7ca 100644 --- a/app/assets/javascripts/base.js +++ b/app/assets/javascripts/base.js @@ -24,7 +24,8 @@ $.fn.scrollTo = function(selector) { }, ANIMATION_DURATION); }; -$.fn.replaceWithPush = function(a) { +// Same as $.replaceWith, just returns the new element instead of the deleted one +$.fn.replaceWithAndReturnNewElement = function(a) { const $a = $(a); this.replaceWith($a); return $a; diff --git a/app/assets/javascripts/channels/la_exercises.js b/app/assets/javascripts/channels/la_exercises.js index 76a2d988..8e758e49 100644 --- a/app/assets/javascripts/channels/la_exercises.js +++ b/app/assets/javascripts/channels/la_exercises.js @@ -31,7 +31,7 @@ $(document).on('turbolinks:load', function() { if ($row.length === 0) { $row = $($('#posted_rfcs')[0].insertRow(0)); } - $row = $row.replaceWithPush(data.html); + $row = $row.replaceWithAndReturnNewElement(data.html); $row.find('time').timeago(); $row.click(function () { Turbolinks.visit($(this).data("href")); @@ -236,7 +236,7 @@ $(document).on('turbolinks:load', function() { $('#no_chart_data').addClass("d-none"); transitionStacked(); - // transitionGrouped(); + // ToDo: Add button to switch using transitionGrouped(); buildDictionary(additional_user_data); } diff --git a/app/models/exercise.rb b/app/models/exercise.rb index 99c08757..75ae109e 100644 --- a/app/models/exercise.rb +++ b/app/models/exercise.rb @@ -97,93 +97,93 @@ class Exercise < ApplicationRecord submissions.user_type, score, created_at, - (created_at - lag(created_at) over (PARTITION BY submissions.user_id, exercise_id + (created_at - lag(created_at) over (PARTITION BY submissions.user_type, submissions.user_id, exercise_id ORDER BY created_at)) AS working_time - FROM submissions - WHERE exercise_id = #{exercise_id} AND study_group_id = #{study_group_id} #{additional_filter}), - working_time_with_deltas_ignored AS ( - SELECT user_id, - user_type, - score, - sum(CASE WHEN score IS NOT NULL THEN 1 ELSE 0 END) - over (ORDER BY user_type, user_id, created_at) AS change_in_score, - created_at, - CASE WHEN working_time >= #{StatisticsHelper::WORKING_TIME_DELTA_IN_SQL_INTERVAL} THEN '0' ELSE working_time END AS working_time_filtered - FROM working_time_between_submissions - ), - working_times_with_score_expanded AS ( - SELECT user_id, - user_type, - created_at, - working_time_filtered, - first_value(score) - over (PARTITION BY user_type, user_id, change_in_score ORDER BY created_at ASC) AS corrected_score - FROM working_time_with_deltas_ignored - ), - working_times_with_duplicated_last_row_per_score AS ( - SELECT * - FROM working_times_with_score_expanded - UNION ALL - -- Duplicate last row per score and make it unique by setting another created_at timestamp. - -- In addition, the working time is set to zero in order to prevent getting a wrong time. - -- This duplication is needed, as we will shift the scores and working times by one and need to ensure not to loose any information. - SELECT DISTINCT ON (user_type, user_id, corrected_score) user_id, - user_type, - created_at + INTERVAL '1us', - '00:00:00' as working_time_filtered, - corrected_score - FROM working_times_with_score_expanded - ), - working_times_with_score_not_null_and_shifted AS ( - SELECT user_id, - user_type, - coalesce(lag(corrected_score) over (PARTITION BY user_type, user_id ORDER BY created_at ASC), - 0) AS shifted_score, - created_at, - working_time_filtered - FROM working_times_with_duplicated_last_row_per_score - ), - working_times_to_be_sorted AS ( - SELECT user_id, - user_type, - shifted_score AS score, - MIN(created_at) AS start_time, - SUM(working_time_filtered) AS working_time, - SUM(SUM(working_time_filtered)) over (PARTITION BY user_type, user_id) AS total_working_time - FROM working_times_with_score_not_null_and_shifted - GROUP BY user_id, user_type, score - ), - working_times_with_index AS ( - SELECT (dense_rank() over (ORDER BY total_working_time, user_type, user_id ASC) - 1) AS index, - user_id, - user_type, - score, - start_time, - working_time, - total_working_time - FROM working_times_to_be_sorted) - SELECT index, - user_id, - user_type, - name, - score, - start_time, - working_time, - total_working_time - FROM working_times_with_index - JOIN external_users ON user_type = 'ExternalUser' AND user_id = external_users.id - UNION ALL - SELECT index, - user_id, - user_type, - name, - score, - start_time, - working_time, - total_working_time - FROM working_times_with_index - JOIN internal_users ON user_type = 'InternalUser' AND user_id = internal_users.id - ORDER BY index, score ASC LIMIT 200; + FROM submissions + WHERE exercise_id = #{exercise_id} AND study_group_id = #{study_group_id} #{additional_filter}), + working_time_with_deltas_ignored AS ( + SELECT user_id, + user_type, + score, + sum(CASE WHEN score IS NOT NULL THEN 1 ELSE 0 END) + over (ORDER BY user_type, user_id, created_at ASC) AS change_in_score, + created_at, + CASE WHEN working_time >= #{StatisticsHelper::WORKING_TIME_DELTA_IN_SQL_INTERVAL} THEN '0' ELSE working_time END AS working_time_filtered + FROM working_time_between_submissions + ), + working_times_with_score_expanded AS ( + SELECT user_id, + user_type, + created_at, + working_time_filtered, + first_value(score) + over (PARTITION BY user_type, user_id, change_in_score ORDER BY created_at ASC) AS corrected_score + FROM working_time_with_deltas_ignored + ), + working_times_with_duplicated_last_row_per_score AS ( + SELECT * + FROM working_times_with_score_expanded + UNION ALL + -- Duplicate last row per user and score and make it unique by setting another created_at timestamp. + -- In addition, the working time is set to zero in order to prevent getting a wrong time. + -- This duplication is needed, as we will shift the scores and working times by one and need to ensure not to loose any information. + SELECT DISTINCT ON (user_type, user_id, corrected_score) user_id, + user_type, + created_at + INTERVAL '1us', + '00:00:00' as working_time_filtered, + corrected_score + FROM working_times_with_score_expanded + ), + working_times_with_score_not_null_and_shifted AS ( + SELECT user_id, + user_type, + coalesce(lag(corrected_score) over (PARTITION BY user_type, user_id ORDER BY created_at ASC), + 0) AS shifted_score, + created_at, + working_time_filtered + FROM working_times_with_duplicated_last_row_per_score + ), + working_times_to_be_sorted AS ( + SELECT user_id, + user_type, + shifted_score AS score, + MIN(created_at) AS start_time, + SUM(working_time_filtered) AS working_time_per_score, + SUM(SUM(working_time_filtered)) over (PARTITION BY user_type, user_id) AS total_working_time + FROM working_times_with_score_not_null_and_shifted + GROUP BY user_id, user_type, score + ), + working_times_with_index AS ( + SELECT (dense_rank() over (ORDER BY total_working_time, user_type, user_id ASC) - 1) AS index, + user_id, + user_type, + score, + start_time, + working_time_per_score, + total_working_time + FROM working_times_to_be_sorted) + SELECT index, + user_id, + user_type, + name, + score, + start_time, + working_time_per_score, + total_working_time + FROM working_times_with_index + JOIN external_users ON user_type = 'ExternalUser' AND user_id = external_users.id + UNION ALL + SELECT index, + user_id, + user_type, + name, + score, + start_time, + working_time_per_score, + total_working_time + FROM working_times_with_index + JOIN internal_users ON user_type = 'InternalUser' AND user_id = internal_users.id + ORDER BY index, score ASC; """ end @@ -210,7 +210,7 @@ class Exercise < ApplicationRecord additional_user_data[bucket] ||= [] additional_user_data[max_bucket + 1] ||= [] - user_progress[bucket][tuple['index']] = tuple["working_time"] + user_progress[bucket][tuple['index']] = tuple["working_time_per_score"] additional_user_data[bucket][tuple['index']] = {start_time: tuple["start_time"], score: tuple["score"]} additional_user_data[max_bucket + 1][tuple['index']] = {id: tuple['user_id'], type: tuple['user_type'], name: tuple['name']} end