[MDEV-23175] ftime deprecated - failed to build with glibc-2.31 Created: 2020-07-15  Updated: 2021-12-22  Resolved: 2020-07-28

Status: Closed
Project: MariaDB Server
Component/s: Performance Schema
Affects Version/s: 10.1.45, 10.5.3
Fix Version/s: 10.5.5

Type: Bug Priority: Major
Reporter: Daniel Black Assignee: Daniel Black
Resolution: Fixed Votes: 0
Labels: None
Environment:

Fedora 32 . gcc10.1.0, glibc 2.31-



 Description   

/home/dan/repos/mariadb-server-10.5/mysys/my_rdtsc.c: In function ‘my_timer_milliseconds’:
/home/dan/repos/mariadb-server-10.5/mysys/my_rdtsc.c:181:3: error: ‘ftime’ is deprecated [-Werror=deprecated-declarations]
  181 |   ftime(&ft);
      |   ^~~~~
In file included from /home/dan/repos/mariadb-server-10.5/mysys/my_rdtsc.c:79:
/usr/include/sys/timeb.h:39:12: note: declared here
   39 | extern int ftime (struct timeb *__timebuf)
      |            ^~~~~
cc1: all warnings bein



 Comments   
Comment by Daniel Black [ 2020-07-28 ]

Commits leave ftime there however get_clock will get hit for all forceable OSs at the moment.

ftime could be removed in 10.6 kevg has some ideas using C++ time constructs.

If a form of backport should be attempted please let me know.

Comment by Eugene Kosov (Inactive) [ 2020-07-28 ]

The idea is pretty simple and looks roughly like this:

diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt
index f4a6a6d55b3..f4ed7ee7df4 100644
--- a/mysys/CMakeLists.txt
+++ b/mysys/CMakeLists.txt
@@ -43,7 +43,7 @@ SET(MYSYS_SOURCES  array.c charset-def.c charset.c checksum.c my_default.c
                                 safemalloc.c my_new.cc
 				my_getncpus.c my_safehash.c my_chmod.c my_rnd.c
                                 my_uuid.c wqueue.c waiting_threads.c ma_dyncol.c ../sql-common/my_time.c
-				my_rdtsc.c my_context.c psi_noop.c
+				my_rdtsc.cc my_context.c psi_noop.c
                                 my_atomic_writes.c my_cpu.c my_likely.c
                                 file_logger.c my_dlerror.c)
 
diff --git a/mysys/my_rdtsc.cc b/mysys/my_rdtsc.cc
index 2d93239f7dd..0f37183f17c 100644
--- a/mysys/my_rdtsc.cc
+++ b/mysys/my_rdtsc.cc
@@ -55,6 +55,8 @@
 #include "my_global.h"
 #include "my_rdtsc.h"
 
+#include <chrono>
+
 #if defined(_WIN32)
 #include <stdio.h>
 #include "windows.h"
@@ -95,33 +97,9 @@
 
 ulonglong my_timer_nanoseconds(void)
 {
-#if defined(HAVE_READ_REAL_TIME)
-  {
-    timebasestruct_t tr;
-    read_real_time(&tr, TIMEBASE_SZ);
-    return (ulonglong) tr.tb_high * 1000000000 + (ulonglong) tr.tb_low;
-  }
-#elif defined(HAVE_SYS_TIMES_H) && defined(HAVE_GETHRTIME)
-  /* SunOS 5.10+, Solaris, HP-UX: hrtime_t gethrtime(void) */
-  return (ulonglong) gethrtime();
-#elif defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_REALTIME)
-  {
-    struct timespec tp;
-    clock_gettime(CLOCK_REALTIME, &tp);
-    return (ulonglong) tp.tv_sec * 1000000000 + (ulonglong) tp.tv_nsec;
-  }
-#elif defined(__APPLE__) && defined(__MACH__)
-  {
-    ulonglong tm;
-    static mach_timebase_info_data_t timebase_info= {0,0};
-    if (timebase_info.denom == 0)
-      (void) mach_timebase_info(&timebase_info);
-    tm= mach_absolute_time();
-    return (tm * timebase_info.numer) / timebase_info.denom;
-  }
-#else
-  return 0;
-#endif
+  return std::chrono::duration_cast<std::chrono::nanoseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
 }
 
 /*
@@ -135,35 +113,9 @@ ulonglong my_timer_nanoseconds(void)
 
 ulonglong my_timer_microseconds(void)
 {
-#if defined(HAVE_GETTIMEOFDAY)
-  {
-    static ulonglong last_value= 0;
-    struct timeval tv;
-    if (gettimeofday(&tv, NULL) == 0)
-      last_value= (ulonglong) tv.tv_sec * 1000000 + (ulonglong) tv.tv_usec;
-    else
-    {
-      /*
-        There are reports that gettimeofday(2) can have intermittent failures
-        on some platform, see for example Bug#36819.
-        We are not trying again or looping, just returning the best value possible
-        under the circumstances ...
-      */
-      last_value++;
-    }
-    return last_value;
-  }
-#elif defined(_WIN32)
-  {
-    /* QueryPerformanceCounter usually works with about 1/3 microsecond. */
-    LARGE_INTEGER t_cnt;
-
-    QueryPerformanceCounter(&t_cnt);
-    return (ulonglong) t_cnt.QuadPart;
-  }
-#else
-  return 0;
-#endif
+  return std::chrono::duration_cast<std::chrono::microseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
n+      .count();
 }
 
 /*
@@ -175,41 +127,9 @@ ulonglong my_timer_microseconds(void)
 
 ulonglong my_timer_milliseconds(void)
 {
-#if defined(HAVE_SYS_TIMEB_H) && defined(HAVE_FTIME)
-  /* ftime() is obsolete but maybe the platform is old */
-  struct timeb ft;
-  ftime(&ft);
-  return (ulonglong)ft.time * 1000 + (ulonglong)ft.millitm;
-#elif defined(HAVE_TIME)
-  return (ulonglong) time(NULL) * 1000;
-#elif defined(_WIN32)
-   FILETIME ft;
-   GetSystemTimeAsFileTime( &ft );
-   return ((ulonglong)ft.dwLowDateTime +
-                  (((ulonglong)ft.dwHighDateTime) << 32))/10000;
-#else
-  return 0;
-#endif
-}
-
-/*
-  For ticks, which we handle with times(), the frequency
-  is usually 100/second and the overhead is surprisingly
-  bad, sometimes even worse than gettimeofday's overhead.
-*/
-
-ulonglong my_timer_ticks(void)
-{
-#if defined(HAVE_SYS_TIMES_H) && defined(HAVE_TIMES)
-  {
-    struct tms times_buf;
-    return (ulonglong) times(&times_buf);
-  }
-#elif defined(_WIN32)
-  return (ulonglong) GetTickCount();
-#else
-  return 0;
-#endif
+  return std::chrono::duration_cast<std::chrono::milliseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
 }
 
 /*

I think it's safe to do it in 10.5. Maybe it can work even in 10.4, but lets be cautious

Generated at Thu Feb 08 09:20:25 UTC 2024 using Jira 8.20.16#820016-sha1:9d11dbea5f4be3d4cc21f03a88dd11d8c8687422.