prperf.c
上传用户:jlfgdled
上传日期:2013-04-10
资源大小:33168k
文件大小:11k
源码类别:

Linux/Unix编程

开发平台:

Unix_Linux

  1. /*****************************************************************************
  2.  *
  3.  * Module Name: prperf.c
  4.  *              $Revision: 21 $
  5.  *
  6.  *****************************************************************************/
  7. /*
  8.  *  Copyright (C) 2000, 2001 Andrew Grover
  9.  *
  10.  *  This program is free software; you can redistribute it and/or modify
  11.  *  it under the terms of the GNU General Public License as published by
  12.  *  the Free Software Foundation; either version 2 of the License, or
  13.  *  (at your option) any later version.
  14.  *
  15.  *  This program is distributed in the hope that it will be useful,
  16.  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  17.  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18.  *  GNU General Public License for more details.
  19.  *
  20.  *  You should have received a copy of the GNU General Public License
  21.  *  along with this program; if not, write to the Free Software
  22.  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  23.  */
  24. /*
  25.  * TBD: 1. Support ACPI 2.0 processor performance states (not just throttling).
  26.  *      2. Fully implement thermal -vs- power management limit control.
  27.  */
  28. #include <acpi.h>
  29. #include <bm.h>
  30. #include "pr.h"
  31. #define _COMPONENT ACPI_PROCESSOR
  32. MODULE_NAME ("prperf")
  33. /****************************************************************************
  34.  *                                  Globals
  35.  ****************************************************************************/
  36. extern fadt_descriptor_rev2 acpi_fadt;
  37. const u32 POWER_OF_2[] = {1,2,4,8,16,32,64,128,256,512};
  38. /****************************************************************************
  39.  *
  40.  * FUNCTION:    pr_perf_get_frequency
  41.  *
  42.  * PARAMETERS:
  43.  *
  44.  * RETURN:
  45.  *
  46.  * DESCRIPTION:
  47.  *
  48.  ****************************************************************************/
  49. acpi_status
  50. pr_perf_get_frequency (
  51. PR_CONTEXT *processor,
  52. u32 *frequency) {
  53. acpi_status status = AE_OK;
  54. FUNCTION_TRACE("pr_perf_get_frequency");
  55. if (!processor || !frequency) {
  56. return_ACPI_STATUS(AE_BAD_PARAMETER);
  57. }
  58. /* TBD: Generic method to calculate processor frequency. */
  59. return_ACPI_STATUS(status);
  60. }
  61. /****************************************************************************
  62.  *
  63.  * FUNCTION:    pr_perf_get_state
  64.  *
  65.  * PARAMETERS:
  66.  *
  67.  * RETURN:
  68.  *
  69.  * DESCRIPTION:
  70.  *
  71.  ****************************************************************************/
  72. /* TBD: Include support for _real_ performance states (not just throttling). */
  73. acpi_status
  74. pr_perf_get_state (
  75. PR_CONTEXT              *processor,
  76. u32                     *state)
  77. {
  78. u32                     pblk_value = 0;
  79. u32                     duty_mask = 0;
  80. u32                     duty_cycle = 0;
  81. FUNCTION_TRACE("pr_perf_get_state");
  82. if (!processor || !state) {
  83. return_ACPI_STATUS(AE_BAD_PARAMETER);
  84. }
  85. if (processor->performance.state_count == 1) {
  86. *state = 0;
  87. return_ACPI_STATUS(AE_OK);
  88. }
  89. acpi_os_read_port(processor->pblk.address, &pblk_value, 32);
  90. /*
  91.  * Throttling Enabled?
  92.  * -------------------
  93.  * If so, calculate the current throttling state, otherwise return
  94.  * '100% performance' (state 0).
  95.  */
  96. if (pblk_value & 0x00000010) {
  97. duty_mask = processor->performance.state_count - 1;
  98. duty_mask <<= acpi_fadt.duty_offset;
  99. duty_cycle = pblk_value & duty_mask;
  100. duty_cycle >>= acpi_fadt.duty_offset;
  101. if (duty_cycle == 0) {
  102. *state = 0;
  103. }
  104. else {
  105. *state = processor->performance.state_count -
  106. duty_cycle;
  107. }
  108. }
  109. else {
  110. *state = 0;
  111. }
  112. ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Processor [%02x] is at performance state [%d%%].n", processor->device_handle, processor->performance.state[*state].performance));
  113. return_ACPI_STATUS(AE_OK);
  114. }
  115. /****************************************************************************
  116.  *
  117.  * FUNCTION:    pr_perf_set_state
  118.  *
  119.  * PARAMETERS:
  120.  *
  121.  * RETURN:      AE_OK
  122.  *              AE_BAD_PARAMETER
  123.  *              AE_BAD_DATA         Invalid target throttling state.
  124.  *
  125.  * DESCRIPTION:
  126.  *
  127.  ****************************************************************************/
  128. /* TBD: Includes support for _real_ performance states (not just throttling). */
  129. acpi_status
  130. pr_perf_set_state (
  131. PR_CONTEXT              *processor,
  132. u32                     state)
  133. {
  134. u32                     pblk_value = 0;
  135. u32                     duty_mask = 0;
  136. u32                     duty_cycle = 0;
  137. u32                     i = 0;
  138. FUNCTION_TRACE ("pr_perf_set_state");
  139. if (!processor) {
  140. return_ACPI_STATUS(AE_BAD_PARAMETER);
  141. }
  142. if (state > (processor->performance.state_count - 1)) {
  143. return_ACPI_STATUS(AE_BAD_DATA);
  144. }
  145. if ((state == processor->performance.active_state) ||
  146. (processor->performance.state_count == 1)) {
  147. return_ACPI_STATUS(AE_OK);
  148. }
  149. /*
  150.  * Calculate Duty Cycle/Mask:
  151.  * --------------------------
  152.  * Note that we don't support duty_cycle values that span bit 4.
  153.  */
  154. if (state) {
  155. duty_cycle = processor->performance.state_count - state;
  156. duty_cycle <<= acpi_fadt.duty_offset;
  157. }
  158. else {
  159. duty_cycle = 0;
  160. }
  161. duty_mask = ~((u32)(processor->performance.state_count - 1));
  162. for (i=0; i<acpi_fadt.duty_offset; i++) {
  163. duty_mask <<= acpi_fadt.duty_offset;
  164. duty_mask += 1;
  165. }
  166. /*
  167.  * Disable Throttling:
  168.  * -------------------
  169.  * Got to turn it off before you can change the duty_cycle value.
  170.  * Throttling is disabled by writing a 0 to bit 4.
  171.  */
  172. acpi_os_read_port(processor->pblk.address, &pblk_value, 32);
  173. if (pblk_value & 0x00000010) {
  174. pblk_value &= 0xFFFFFFEF;
  175. acpi_os_write_port(processor->pblk.address, pblk_value, 32);
  176. }
  177. /*
  178.  * Set Duty Cycle:
  179.  * ---------------
  180.  * Mask off the old duty_cycle value, mask in the new.
  181.  */
  182. pblk_value &= duty_mask;
  183. pblk_value |= duty_cycle;
  184. acpi_os_write_port(processor->pblk.address, pblk_value, 32);
  185. /*
  186.  * Enable Throttling:
  187.  * ------------------
  188.  * But only for non-zero (non-100% performance) states.
  189.  */
  190. if (state) {
  191. pblk_value |= 0x00000010;
  192. acpi_os_write_port(processor->pblk.address, pblk_value, 32);
  193. }
  194. processor->performance.active_state = state;
  195. ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Processor [%02x] set to performance state [%d%%].n", processor->device_handle, processor->performance.state[state].performance));
  196. return_ACPI_STATUS(AE_OK);
  197. }
  198. /****************************************************************************
  199.  *
  200.  * FUNCTION:    pr_perf_set_limit
  201.  *
  202.  * PARAMETERS:
  203.  *
  204.  * RETURN:
  205.  *
  206.  * DESCRIPTION:
  207.  *
  208.  ****************************************************************************/
  209. acpi_status
  210. pr_perf_set_limit (
  211. PR_CONTEXT              *processor,
  212. u32                     limit)
  213. {
  214. acpi_status status = AE_OK;
  215. PR_PERFORMANCE *performance = NULL;
  216. FUNCTION_TRACE ("pr_perf_set_limit");
  217. if (!processor) {
  218. return_ACPI_STATUS(AE_BAD_PARAMETER);
  219. }
  220. performance = &(processor->performance);
  221. /*
  222.  * Set Limit:
  223.  * ----------
  224.  * TBD:  Properly manage thermal and power limits (only set
  225.  *  performance state iff...).
  226.  */
  227. switch (limit) {
  228. case PR_PERF_DEC:
  229. if (performance->active_state <
  230. (performance->state_count-1)) {
  231. status = pr_perf_set_state(processor,
  232. (performance->active_state+1));
  233. }
  234. break;
  235. case PR_PERF_INC:
  236. if (performance->active_state > 0) {
  237. status = pr_perf_set_state(processor,
  238. (performance->active_state-1));
  239. }
  240. break;
  241. case PR_PERF_MAX:
  242. if (performance->active_state != 0) {
  243. status = pr_perf_set_state(processor, 0);
  244. }
  245. break;
  246. default:
  247. return_ACPI_STATUS(AE_BAD_DATA);
  248. break;
  249. }
  250. if (ACPI_SUCCESS(status)) {
  251. performance->thermal_limit = performance->active_state;
  252. }
  253. ACPI_DEBUG_PRINT ((ACPI_DB_INFO, "Processor [%02x] thermal performance limit set to [%d%%].n", processor->device_handle, processor->performance.state[performance->active_state].performance));
  254. return_ACPI_STATUS(status);
  255. }
  256. /****************************************************************************
  257.  *                             External Functions
  258.  ****************************************************************************/
  259. /****************************************************************************
  260.  *
  261.  * FUNCTION:    pr_perf_add_device
  262.  *
  263.  * PARAMETERS:  processor Our processor-specific context.
  264.  *
  265.  * RETURN:      AE_OK
  266.  *              AE_BAD_PARAMETER
  267.  *
  268.  * DESCRIPTION: Calculates the number of throttling states and the state
  269.  *              performance/power values.
  270.  *
  271.  ****************************************************************************/
  272. /* TBD: Support duty_cycle values that span bit 4. */
  273. acpi_status
  274. pr_perf_add_device (
  275. PR_CONTEXT              *processor)
  276. {
  277. acpi_status             status = AE_OK;
  278. u32                     i = 0;
  279. u32                     performance_step = 0;
  280. u32                     percentage = 0;
  281. FUNCTION_TRACE("pr_perf_add_device");
  282. if (!processor) {
  283. return_ACPI_STATUS(AE_BAD_PARAMETER);
  284. }
  285. /*
  286.  * Valid PBLK?
  287.  * -----------
  288.  * For SMP it is common to have the first (boot) processor have a
  289.  * valid PBLK while all others do not -- which implies that
  290.  * throttling has system-wide effects (duty_cycle programmed into
  291.  * the chipset effects all processors).
  292.  */
  293. if ((processor->pblk.length < 6) || !processor->pblk.address) {
  294. processor->performance.state_count = 1;
  295. }
  296. /*
  297.  * Valid Duty Offset/Width?
  298.  * ------------------------
  299.  * We currently only support duty_cycle values that fall within
  300.  * bits 0-3, as things get complicated when this value spans bit 4
  301.  * (the throttling enable/disable bit).
  302.  */
  303. else if ((acpi_fadt.duty_offset + acpi_fadt.duty_width) > 4) {
  304. processor->performance.state_count = 1;
  305. }
  306. /*
  307.  * Compute State Count:
  308.  * --------------------
  309.  * The number of throttling states is computed as 2^duty_width,
  310.  * but limited by PR_MAX_THROTTLE_STATES.  Note that a duty_width
  311.  * of zero results is one throttling state (100%).
  312.  */
  313. else {
  314. processor->performance.state_count =
  315. POWER_OF_2[acpi_fadt.duty_width];
  316. }
  317. if (processor->performance.state_count > PR_MAX_THROTTLE_STATES) {
  318. processor->performance.state_count = PR_MAX_THROTTLE_STATES;
  319. }
  320. /*
  321.  * Compute State Values:
  322.  * ---------------------
  323.  * Note that clock throttling displays a linear power/performance
  324.  * relationship (at 50% performance the CPU will consume 50% power).
  325.  */
  326. performance_step = (1000 / processor->performance.state_count);
  327. for (i=0; i<processor->performance.state_count; i++) {
  328. percentage = (1000 - (performance_step * i))/10;
  329. processor->performance.state[i].performance = percentage;
  330. processor->performance.state[i].power = percentage;
  331. }
  332. /*
  333.  * Get Current State:
  334.  * ------------------
  335.  */
  336. status = pr_perf_get_state(processor, &(processor->performance.active_state));
  337. if (ACPI_FAILURE(status)) {
  338. return_ACPI_STATUS(status);
  339. }
  340. /*
  341.  * Set to Maximum Performance:
  342.  * ---------------------------
  343.  * We'll let subsequent policy (e.g. thermal/power) decide to lower
  344.  * performance if it so chooses, but for now crank up the speed.
  345.  */
  346. if (0 != processor->performance.active_state) {
  347. status = pr_perf_set_state(processor, 0);
  348. }
  349. return_ACPI_STATUS(status);
  350. }
  351. /****************************************************************************
  352.  *
  353.  * FUNCTION:    pr_perf_remove_device
  354.  *
  355.  * PARAMETERS:
  356.  *
  357.  * RETURN:
  358.  *
  359.  * DESCRIPTION:
  360.  *
  361.  ****************************************************************************/
  362. acpi_status
  363. pr_perf_remove_device (
  364. PR_CONTEXT              *processor)
  365. {
  366. acpi_status             status = AE_OK;
  367. FUNCTION_TRACE("pr_perf_remove_device");
  368. if (!processor) {
  369. return_ACPI_STATUS(AE_BAD_PARAMETER);
  370. }
  371. MEMSET(&(processor->performance), 0, sizeof(PR_PERFORMANCE));
  372. return_ACPI_STATUS(status);
  373. }