aggregated data

create the table ProductVersion

(

product_id VARCHAR2 (10) not null,

product_type_p not null, number 4

Start_date DATE not null,

End_date DATE,

rate non-null, the NUMBER

type_p VARCHAR2 (10)

)

Select * from ProductVersion

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 644, to_date (January 2, 2012 ', 'dd-mm-yyyy'), to_date (January 31, 2013 ', 'dd-mm-yyyy'), 45, "male");

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 644, to_date (January 2, 2013 ', 'dd-mm-yyyy'), to_date (January 31, 2014 ', 'dd-mm-yyyy'), 35, "male");

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 644, to_date (January 2, 2014 ', 'dd-mm-yyyy'), to_date (January 31, 2015 ', 'dd-mm-yyyy'), 35, "male");

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1124, to_date (January 2, 2012 ', 'dd-mm-yyyy'), to_date (January 31, 2013 ', 'dd-mm-yyyy'), 45, "male");

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1124, to_date (January 2, 2013 ', 'dd-mm-yyyy'), to_date (January 31, 2014 ', 'dd-mm-yyyy'), 35, "male");

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1124, to_date (January 2, 2014 ', 'dd-mm-yyyy'), to_date (January 31, 2015 ', 'dd-mm-yyyy'), 35, "male");

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1129, to_date (January 2, 2012 ', 'dd-mm-yyyy'), to_date (January 31, 2013 ', 'dd-mm-yyyy'), 25, 'male');

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1129, to_date (January 2, 2013 ', 'dd-mm-yyyy'), to_date (January 31, 2014 ', 'dd-mm-yyyy'), 25, 'male');

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1129, to_date (January 2, 2014 ', 'dd-mm-yyyy'), to_date (January 31, 2015 ', 'dd-mm-yyyy'), 25, 'male');

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1124, to_date (January 2, 2012 ',' dd-mm-yyyy ""), to_date (January 31, 2013 ', 'dd-mm-yyyy'), 45, 'female');

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1124, to_date (January 2, 2013 ', 'dd-mm-yyyy'), to_date (January 31, 2014 ',' dd-mm-yyyy ""), 35, 'female');

Insert in product_version (product_id, product_type_p, start_date, end_date, rate, type_p)

values ('0080166887', 1124, to_date (January 2, 2014 ', 'dd-mm-yyyy'), to_date (January 31, 2015 ',' dd-mm-yyyy ""), 35, 'female');

Select * from ProductVersion

where product_id = 0080166887

order of type_p desc, asc start_date, product_type_p, rate

the results should be like this:

for each product_type, with the same type_p and the rate.

need to check if there are many years in a row

means that the end_date and the start date of the next record are sequenced.

for the example given, the results should be like this:

PRODUCT_TYPE_PSTART_DATEEND_DATERATETYPE_P
64402/01/201231/01/201345lead
64402/01/201331/01/201535lead
112402/01/201231/01/201345lead
112402/01/201331/01/201535lead
112402/01/201331/01/201535Trail
112402/01/201231/01/201345Trail
112902/01/201231/01/201525lead

Please your help is there no use for analytical functions?

Thanks in advance

Naama

If you need this...

SELECT product_type_p,

start_date min_date,

max_date end_date,

rate,

type_p

Of

(

SELECT product_type_p,

start_date,

End_date,

rate,

CASE WHEN type_p is 'male' AND 'lead' ELSE 'trail' END type_p.

MIN (start_date) OVER(PARTITION BY product_id,product_type_p,rate,type_p,case_val) min_date,

Max (end_date) OVER(PARTITION BY product_id,product_type_p,rate,type_p,case_val) max_date

Of

(

SELECT product_id,

product_type_p,

start_date,

End_date,

rate,

type_p,

CASE WHEN end_date + 1 = LEAD(start_date,1,end_date+1), OVER (PARTITION BY product_id, product_type_p, spleen, type_p ORDER BY start_date), 1

2 ELSE

END case_val

OF ProductVersion

)

)

WHERE the arguments start_date = min_date;

OUTPUT:-

========

START_DATE END_DATE RATE TYPE_P PRODUCT_TYPE_P

-------------- ---------- --------- ---------- ------

644 1 March 14 lead 31 January 15 35

644 1 February 13 lead 31 January 14 35

644 1 February 12 lead 31 January 13 45

1124 1 February 13 track 31 January 15 35

1124 1 February 13 lead 31 January 15 35

1124 1 February 12 track 31 January 13 45

1124 1 February 12 lead 31 January 13 45

1129 1 February 12 lead 31 January 15 25

8 selected lines

Thank you

Ann

Tags: Database

Similar Questions

  • Problem in display detail with aggregated data.

    Hello

    I'm new on the BEEP and I am facing problem to see the detail, but also the aggregated both values.

    My details are as below

    Value of a security
    S1 10
    S2 20
    S3 30
    S3 40
    S4 50
    S5 60
    S5 70

    I want to display data in the report
    Value of a security
    S1 10
    S2 20
    S3 30
    S3 40
    S3 70 total
    S4 50
    S5 10
    S5 70
    Total 80 S5


    I tried to use to <? for-each-group: G_2; / Security? > but I get after release:
    Value of a security
    S1 10
    S2 20
    S3 30
    S3 70 total
    S4 50
    S5 10
    Total 80 S5

    Model:
    <? for-each-group: G_2; / Security? > <? Security? >: <? Value? >
    <? If: count (current - group (/SECNAME)) > 1? > Total <? Security? >: <? sum (current - group (/Value))? > <? end if? > <? end for each group -? >

    The problem is that I need to view the details as well as aggregated data. Please suggest.

    Instead of G1, use G_1.

    Since you are using 11g, you can also try this code:

    :
    1? > total :

  • Aggregation of RF

    Hello

    An AP1100 can function as a capable aggregation RF WDS?

    Thank you.

    Hi Navid,

    The 1100 Series AP can be set up for WDS. Look at how the WDS AP is involved in the management of the RM;

    The WDS allows to control path technologies that must be active on an access point on each AP subnet; a backup WDS can also be defined in each AP subnet. The WDS provides:

    Fast, customer's wireless secure layer 2 roaming the WDS plays the role of authenticator 802. 1 x for the network layer 2 wireless clients.

    Aggregation of data management (RM) radio the WLSE provides intelligent processing of aggregated data collected by access point WDS to other customers of the wireless network. The WLSE can manage multiple subnets, so it can receive data many WDS APs running radio.

    There is no aggregation data ROM without a WDS.

    This doc.

    http://www.Cisco.com/en/us/products/SW/cscowork/ps3915/products_user_guide_chapter09186a0080527f1f.html#wp1617750

    What is the role of the WDS device in the network wireless LAN (WLAN)?

    The WDS device performs these tasks on your WLAN:

    Announcement of WDS capability and participates in an election of the best WDS for your WLAN device. When you configure your WLAN for WDS, put you up a feature as the main WDS candidate and one or several additional devices as backup WDS candidate. If the main feature of WDS is taken offline, a backup WDS devices takes the place of the main unit.

    Authenticates all the APs in the subnet and establishes a communication channel secured with each access points

    APs radio data collection in the subnet, aggregates the data and transfers data to the device Wireless LAN Solution Engine (WLSE) on your network.

    Saves all client devices in the subnet and establishes key in session for client devices puts in cache the credentials of client security. When a customer switches to another access point, the WDS device transfers the client credentials for security to the new access point.

    This doc.

    Field Services FAQ wireless

    http://www.Cisco.com/en/us/Tech/tk722/tk809/technologies_q_and_a_item09186a00804d4421.shtml#QA6

    Series 1100 AP - configuring WDS, Fast Secure roaming, and Radio management

    http://www.Cisco.com/en/us/docs/wireless/access_point/12.3_2_JA/configuration/guide/s32roamg.html

    I hope this helps!

    Rob

  • ODI - how to export data of "elimination" HFM?

    Hello

    I'm trying to export some data from HFM in SQL, using ODI table. It seems that ODI export data from the 'currency of the entity' as a default. Is it possible to export the value of "elimination" instead?

    If so, how can I do it?

    It is not the case. Only data can be exported with ODI. Elimination is regarded as aggregated data, and for this reason cannot be exported. Only level 0 data can be exported.

  • Aggregation of a member of ASO with the formula of the IIR

    Hello

    I have a cube ASO has six dimensions. Based on the requirement, I need to have a calculated member (MBR1_CL) by taking the data from two members of entry (MBR2_IN & MBR1_IN) from specific intersections.

    I have written a formula of ASO member to the dimension of accounts MBR1_CL using IIR member below:

    IIF (IsAncestor ([West], [entity]. CurrentMember) AND IsAncestor ([ProdLine1], [Product]. CurrentMember) AND is ([years]. CurrentMember, & CYear) AND

    (Is ([period]. CurrentMember, [Jan]) OR ([period]. CurrentMember, [February]) OR ([period]. CurrentMember, [Mar])) AND

    IsAncestor ([CustCat1], [customer]. CurrentMember), [accounts]. [MBR2_IN]. Value, [accounts]. [MBR1_IN]. Value)

    The formula checks and working at level 0, but my request is related to the aggregation of month in the quarterfinals. MBR1_IN members & MBR2_IN gave responsible for all months, i.e. from Jan to Dec and when it is retrieved to Qtr levels it shows correct aggregated data, however for Member MBR1_CL which data takes the MBR2_IN for Jan, Feb, Mar months it does not show the correct aggregation in Qtr1 shows rather value of Member MBR1_IN in Qtr1. I set value of order of resolution as '2' for MBR1_CL, 3 'for Qtr1 and all other members being default ' 0'. Could you please help and suggest a solution to have correct data aggregated to all levels for Member MBR1_CL.

    Thank you

    AK

    Hi, AK,

    It's also what you say Essbase to do...

    (Is ([period]. CurrentMember, [Jan]) OR ([period]. CurrentMember, [February]) OR ([period]. CurrentMember, [Mar]))

    You don't say what it should do to T1. I just need the, [accounts]. [MBR1_IN]. Value

    Why should he aggregate the months?

    You will need to add all the logic of aggregation to the MDX. Yes, it will be a long script.

    It is perhaps easier to load the data into the appropriate location.

    Kind regards

    Philip Hulsebosch

  • Disk groups are not visible cluster. vSAN datastore exists. 2 guests (on 8) cluster do not see the vSAN data store. Their storage is not recognized.

    http://i.imgur.com/pqAXtFl.PNG

    http://i.imgur.com/BnztaDD.PNG

    Do not know how even tear it down and rebuild it if the disk groups are not visible. The discs are in good health on each host storage adapters.

    Currently the latest version of vCenter 5.5. Hosts running 5.5 build 2068190

    Just built. Happy to demolish and rebuild. Just do not know why it is not visible on the two hosts and the disk groups are only recognized 3 guests when more are contributing. Also strange that I can't get the disk groups to fill in vCenter. I tried two different browsers (chrome and IE).

    I have now works.

    All the identical 5.5 relies on ESXi hosts. All hosts are homogeneous CPU/sum of the prospects for disk controller / installed RAM/storage.

    I have work. I had to manually destroy all traces of the vSAN on each single to help host node:

    (1) put the hosts in maintenance mode and remove the cluster. I was unable to disable vSAN in the cluster, I made on each node host (manually via the CLI below) then disconnected web client vCenter and return to finally refresh the ability to disable on the cluster.

    esxcli vsan cluster get - to check the status of each host.

    esxcli vsan cluster drop - the vSAN cluster host.

    storage of vsan esxcli list - view records in the individual host group

    esxcli vsan storage remove-d naa.id_of_magnetic_disks_here - to remove each of the disks in the disk group (you can ignore this using the following command to remove the SSD only falling each disc in this host group).

    esxcli vsan storage remove s naa.id_of_solid_state_disks_here - this is the SSD and all the magnetic disks in a given disk group.

    After that, I was able to manually add hosts to the cluster, leave maintenance mode and configure the disk groups. Aggregated data of the vSAN data store is correct now, and everything is functional.

    Another question for those of you who still read... How to configure such as the VM storage strategy that migrates towards (or inspired) the vSAN data store will immediately resume the default storage policy, I built for VSANs?

    Thanks for anyone who has followed.

  • How to load the large amount of data in 2 tables oracle ~ mill 6 lines-URGENT

    Hi all




    I have a file with lines of 6-8 million load us using a direct charge into a temporary table. Then we insert the missing data by research. The data in this temporary table are inserted/updated in table1.

    for ex - we have:
    Table1 (key1, key2, key3, key4, key5, col1, col2, col3)
    Table2 (key1, key2, key3, col1, col2, col3) - the only diff between table1 and table2 is key4 and key5 (located in table 1).

    First we insert/update data into temporary table table1.

    Insertion: insert into table1
    Select * from insert_table where (key1, key2, key3, key4, key5) IN
    (select key1, key2, key3, key4, temporary_table key5
    less
    Select key1, key2, key3, key4, key5 from table1)



    because me to (select all colulmns
    of insert_table
    less
    Select all columns in table1)
    loop
    Update table1;
    end loop;


    Then we insert/update data into table2 from table1.
    -the only difference in the insert/update is I use partiiton by making the sum of the columns non-cles and inserted the aggregated data in table2.

    Insertion: insert into table2
    Select key1...
    sum (nonkey col) over (partition by key1, key2, key3)
    FROM table1 where (key1, key2, key3) IN
    (select key1, key2, key3 in table2
    less
    Select key1, key2, key3 in table1)


    Update:
    I'm in (select key1...
    sum (nonkey col) over (partition by key1, key2, key3)
    FROM table1 where (key1, key2, key3,) IN
    (select key1, key2, key3 col1 in table2
    less
    Select col1, key1, key2, key3... from table1)
    )
    loop
    Update table1;
    end loop;



    First we insert/update data into temporary table table1.

    Insertion: insert into table1
    Select * from insert_table where (key1, key2, key3, key4, key5) IN
    (select key1, key2, key3, key4, temporary_table key5
    less
    Select key1, key2, key3, key4, key5 from table1)



    because me to (select all colulmns
    of insert_table
    less
    Select all columns in table1)
    loop
    Update table1;
    end loop;



    Data is loading properly - IF full charge. As the size of the file grows long OPS in Toad/Session browser - sorting becomes huge.

    How can I improve the speed and make it better?

    You can consider using a single command to MERGE instead of your insert and the separate update loop if you're already on 9i.

    In addition, you should consider generating statistics to a minimum on the insert_table using DBMS_STATS. GATHER_TABLE_STATS.

    Kind regards
    Randolf

    Oracle related blog stuff:
    http://Oracle-Randolf.blogspot.com/

    SQLTools ++ for Oracle (Open source Oracle GUI for Windows):
    http://www.sqltools-plusplus.org:7676 /.
    http://sourceforge.NET/projects/SQLT-pp/

  • 6210XS SQL Performance Benchmarking

    Our company has recently acquired some new berries for a new ERP system. I am the senior analyst programmer on the project and I'm a beginner-intermediate level on the SAN storage, virtualization and optimization of the performance of SQL. I need to get the speed and at the head of what to expect from this new equipment and best practices to test and manage. Our current ERP is on HP - UX and Informix battery is alien technology in relation to where we are.

    We have a network services division, which was responsible for managing the side home with ESX and EqualLogic 6500 non - ERP. This team is more known in the general management of this equipment, but less time to devote to this new ERP project so I spend to help everyone to get more confidence in the train and educate me about it. Phew. To obtain meat now.

    Setup: dedicated network 10 GB iSCSI with frames enabled. No set MPIO. Dedicated to storage pools for the 6210xs, 6210 (10 K SAS) and 6510 (7200 K). All about the 10 GB.

    I use a tool called MS SQLIO to test the OPS are / s of the 6210XS. I used one of the focus of test by default example of the doc "using SQLIO.

    brief: 6 minutes test, sequential I / O, 2 queries of suspense, of the size of e/s/o requires 256 bytes and a 15 GB test file. The results were:

    H:\SQLIO>SQLIO - kR-s360-fsequential-o2-b256 - LS - Fparam.txt
    SQLIO v1.5.SG
    using meter system for the timings of the latency, 2343750 counts per second
    file settings used: param.txt
    file h:\testfile.dat with 16 (0-15) son using mask 0 x 0 (0)
    16 son of reading for 360 seconds of file h:\testfile.dat
    using 256 KB sequential IOs
    activation of several i/o per thread with 2 exceptional
    the indicated use size: 15000 MB for the file: h:\testfile.dat
    initialization done
    AGGREGATED DATA:
    flow measurements:
    IOs/sec: 133,93
    MBs/s: 33.48
    latency settings:
    Min_Latency (MS): 61
    Avg_Latency (MS): 238
    Max_Latency (MS): 1269

    I made a new test using different settings and had very different results:

    H:\SQLIO>SQLIO - kW - s10 - frandom-o8-b8 - LS - Fparam.txt
    SQLIO v1.5.SG
    using meter system for the timings of the latency, 2343750 counts per second
    file settings used: param.txt
    file h:\testfile.dat with 8 wires (0-7) using mask 0 x 0 (0)
    8 son writing for 10 seconds in the file h:\testfile.dat
    using random 8 KB IOs
    activation of several i/o per thread with 8 exceptional
    the indicated use size: 102400 MB for the file: h:\testfile.dat
    initialization done
    AGGREGATED DATA:
    flow measurements:
    IOs/s: 24122.61
    MBs/s: 188.45
    latency settings:
    Min_Latency (MS): 0
    Avg_Latency (MS): 2
    Max_Latency (MS): 25

    Novice question - this is obviously not a good result, but I need to figure out why my test is configured incorrectly or why the table struggled to perform under these test conditions. Thank you for taking the time to read and respond.

    Usually performance problems are caused by not having is not the SAN (server, switches, table) set up by best practices and in some cases FW drivers and/or obsolete equipment.

    With ESX generally 99% performance problems are solved with:

    Delayed ACK disabled

    People with disabilities large Offload received

    Ensure using Round Robin of VMware (with e / s through changed to 3), or use the EQL MEM (version the most recent is 1.2) Multipathing

    If you use multiple VMDK (or ROW), in the virtual machine, each should have its own virtual SCSI adapter

    Upgrade to the latest build ESX, switch, and server updates

    Take a look at the links listed here first.  See also the Firmware of Array Release notes.

    Best practices for ESX

    en.Community.Dell.com/.../20434601.aspx

    Configuration Guide for EqualLigic

    en.Community.Dell.com/.../2639.EqualLogic-Configuration-Guide.aspx

    Quick Configuration portal (start Grand Place)

    en.Community.Dell.com/.../3615.Rapid-EqualLogic-configuration-Portal-by-SIS.aspx

    Best practices white papers, look for SQL and ESX

    en.Community.Dell.com/.../2632.Storage-Infrastructure-and-solutions-Team-publications.aspx

    Compatibility matrix

    en.Community.Dell.com/.../20438558

    -Joe

  • Controlling agency group

    I want to turn on the consolidation of loads of FDMEE to planning.  When you look in the list of entities, nothing comes back.

    Typed in the parent to consolidate, lit entity consolidation, assigned to the Group at the location.

    During the execution of the State of loading of data, receive green checkmarks for everything, including "Strengthening planning data", but the overall condition is red during the execution process.

    Verification audit in SV, don't see aggregated data.  The data is loaded, but as after running a manual calc script, data became consolidated.

    Is there a reason why the target entities do not appear when you navigate in a group of entity control?

    This could be the reason why consolidation fails?

    Hi 983332

    This question is answered?

    If so, please indicate this as correct so that others can see.

    Thank you

  • performance of Materialized view


    Hello Experts!

    I have obligation that migrate tables to materialized views.

    The current process of execution is truncate the table and updating of data that are placed on different the other schema or DB by monthly.

    (insert data into an insert like the example below)

    insert into emp (field 1, Field2,...) select field 1, field 2,... from table1, table2... joins defined.

    If I convert the tables to materialized views.

    Can I know what will be the impact of performance?

    These limited data, I can only comment on a few things from generial:

    1. with MV you don't have to worry about maintenance of the table.

    2 performance will be much better for aggregated data. The cost of Re-calculation will be reduced to a lot because it goes in the MV as a precomputed data.

    3 If you must bear the cost of the insert/update of MV, however, worry the manual activity as this will be done in real time.

  • vCenter Linked Mode and Perl SDK

    Hello

    our VMware environment is divided into two physical places represented as two instances of vCenter. The two vCenters are interconnected. I think that its Mode called "related"?

    Is it possible to use the Perl SDK also bound mode? Currently, I have to manually interrogate both our vCenters and then sort out myself.

    (1) How to configure Perl SDK so I only authenticate once against our environment in Linked mode?

    (2) How can I build queries against the API so results will include objects in the two vCenters? That is to say. If I run a ' Vim::find_entity_views (view_type = > 'VirtualMachine') "I want to get all the objects 'VirtualMachine' in the two vCenters I can avoid running two separate queries and then later combining query results? ".

    Thank you!

    Thomas Willert

    It is not a published API method, but there is a way to get the vCenter server list in a group of related modes.  I have an explanation and sample code: http://www.virtuin.com/2012/12/querying-vcenter-linked-mode-with.html

    With respect to authentication, if your vCenter instances use the same permissions (which is recommended), then it's just a matter to authenticate each vCenter.  This is what the native client as well (if you have never installed vCenters with different permissions, you will see the login for each vCenter prompt).

    If you look at the blog post I linked earlier, you self an example of authentication each vCenter as well.  Basically you call Util::connect() two times, once for each of your URL for vCenter.  However, do not delete the global VIM uses the SDK package, or it will default to the last vCenter.  I show in the example script: LinkedModeExample.pl

    With respect to the combination of queries... not so easy.  You need to redesign each vCenter with $vim-> find_entity_views().  Now on a more advanced note, API is powerful enough to make custom inventory queries, you can usually pick up and keep the updates in your objects.

    What I would recommend as a simple approach is two calls to find_entity_views, then push them in a hash or an array.  You'll just want to use the UUID vCenter:

    $vc_uuid is $vim1-> get_service_content()-> {'about'}-> {'instanceUuid'};.

    The instanceUuid has been added in vCenter 4.x and later versions (do not have in the past 2.5 vCenter instances).  Then use this instanceUuid + moref as hash keys.  This is important because the morefs can be duplicated between vCenters.

    So, you can then create a hash:

    My % vm_hash;

    foreach my {$entity (@$vm_views)}

    My $moref = $entity-> {'mo_ref'}-> {'value'}; # will be vm-# for virtual machines

    $vm_hash {$vc_uuid.} "+". $moref} = $entity;

    }

    Then you can quickly research specific VMs and keep separate them between vCenters.

    You'll find it works for simple data queries, but you start to get more items of inventory (say for an inventory report), the relationship becomes more complex and the number of objects grow.  I did work to push these values in a database and files (including by running it on 5 or less than weight and WaitForUpdatesEx), excel, which works well if you have the time to build the DB and the work of database object schemas.  Just use instanceUuid + Moref value as primary key to prevent any moref vCenter cross reuse value.

    The other option is that the vim reference is added to each object, so you can just get the data of each entity:

    foreach my {$vm (@$vm_views)}

    Print "VM:". "." $vm-> {'name'}. "\n";

    Print "InstanceUuid VC:". $vm-> {'vim'}-> {'service_content'}-> {'about'}-> {'instanceUuid'}. "\n";

    Print "ServiceUrl VC:". $vm-> {'vim'}-> {'service_url'}. "\n";

    }

    I have a project that I tinker with in my spare time (which is rare these days) which aims to consolidate multiple vCenters into one database for queries and reports, but I didn't get far enough away from share it (very rude, but works for a subset of data at present, but it is very effective for collecting loose on<5s intervals).="">

    You can also look into VMware Orchestrator.  Orchestrator can connect to multiple vCenters and caches data for access by automation.  According to your WAN performance, he can work a geographic site and make sure that you need (just watch the moref, names of entities and other potential overlap between vCenters) aggregated data.

  • Query on Aggergation Views

    Hi all

    I have a query on the global view of ASO.

    If I created aggregated views of ASO cube with existing data and when adjustments are made after the creation of views, if he pulls the numbers that have been aggregated before adjustments or it will give the last number which is adjusted?

    Thank you

    Srini

    You will get the most recent issue.  ASO is magical.

    More seriously, Essbase may or may not create additional views on incremental data, but it will do this without any action required from the administrator.  When you query may take the original value of your aggregated data and add possible changes to incremental data (with or without additional views), before returning the result.

    If you load "of not incrementally", merge your incremental data or restructure the cube, Essbase recalculates the views, but even once it does "implicitly" without any required administrator instructions.

  • Limited hard drive space

    Hi all

    I use Hyperic since a VPS server online (2003) and have only 20 GB of space.  Currently Hyperic is installed and also a PostgreSQL database viewer/editor.  So far, I am only tracking a computer through the net and also the server agent is monitored.  I put detailed data for delete after 1 day.

    My question is, is there a way to get rid of all the data over a period of 31 days, detailed or undetailed.  Although I am tracking only 2 computers, HARD disk space disappears by about 10 MB per day.  20-30 for analysis, it may increase to 200-300 MB.

    In addition,... can logs that are collected every day I remove these?

    Thanks for your help.

    -Patrick

    On one instance, I have more than 100 officers. This system has worked for months and mysql db backupfile size is around 60 MB.

    Even if the db size of 10 MB per day with an agent, it does not mean that HQ needs 300 MB for a month. When the data becomes old, HQ tries to compact by removing unnecessary data. The logic behind this is the fact that if you want to see the trend of the year there, you really don't need to have each collected datapoints. You only need aggregated data of the past.

    However, 10 MB increase seems to be quite high. There maybe something wrong with your system. You know, windows and his "log" files are a funny and really crappy thing. Journal of the windows service (who HQ tries to read) has sometimes bloated. You can try to see how many things is recorded in this log of service. This is perhaps the reason why db size increases... Just a guess, thought.

  • Why historicalInterval is NULL for ESXi 5.0.0 build-504890 (no stats PastDay)?

    Hello

    I noticed that for our guests (ESXi 5.0.0 build-504890) of ESXi historicalInterval PerformanceManager property is not set (e.g. NULL). that is, there is no statistics 'Last day' in terms of vSphere client.

    But according to the vSphere Web Services SDK Programming Guide :

    ESXi servers also set a unique historical range (PerformanceManager.historicalInterval) that defines the overall performance data.  This system-defined performance range specifies aggregated data collection every 300 seconds for each counter. You cannot change the intervals of performance on an ESXi server.

    What is the problem with our guests?

    P.S. these hosts are managed by vCenter

    My guess - the parameter is basically ignored with a default installation.  ESXi don't include all the data, but only to pass his stats in real-time to vCenter.

    One time, I wrote on the extension of local performance on ESXi hosts data - http://www.vm-help.com/esx/esx3i/extending_performance_data.php.  With this method, you could get up to 36 hours of a locally stored data value.  I guess you could do it again with ESXi, in which case the setting would then get used.  Why it is there in the first place, I'm not sure.  I don't remember if early ESX version were able to store more data in real time on the spot.

  • Comparison of local VMFS SATA vs SAS VMFS SAN deployment

    Hi all

    According to my understanding, run a virtual machine on top of my SAN with VMFS, supposed to be the best practice of the industry of performance running VM salvation

    However, with the current setup, I am very confused why the performance is slower than Local hard disk

    HARD drive space: 4 x 500 GB SATA 7200 RPM RAID-5

    C:\SQLTEST & gt; sqlio.exe

    SQLIO v1.5.SG

    1-wire plays 30 seconds of the file testfile.dat

    using 2 k IOs over 128 KB stripes with 64 IOs by race

    initialization done

    AGGREGATED DATA:

    flow measurements:

    IOs/s: 8826.73

    MBs/s: 17.23

    While on the SAN HARD drive: 14 x 300 GB SAS 15000 rpm RAID-5

    C:\SQLTEST & gt; sqlio.exe

    SQLIO v1.5.SG

    1-wire plays 30 seconds of the file testfile.dat

    using 2 k IOs over 128 KB stripes with 64 IOs by race

    initialization done

    AGGREGATED DATA:

    flow measurements:

    IOs/s: 2314.03

    MBs/s: 4.51

    No idea how this could happen if you please?

    Kind regards

    AWT

    iSCSI is what it is...   You get wow numbers like you do on the DAS.

    On the MD3000i?  We found it important enough to keep really hard hitting servers / vm / guest on LUNs dedicated and virtual disks.  The limits of 160MO iSCSI are sort by target where each lun is considered to be a separate objective.  On the MD3000i controller double load balancing is done by the virtual disk.   If you match a dedicated to a virtual disk lun and divide the VD upward between the controllers, the server's performance seems strong in production.  Still, you won't get the numbers of wow on the tests you get on DAS devices.

    If you want really good performance on account of the VM?    Use the MS iSCSI initiators, even if we do not like this, tests proved much better performance in tests of IO.  In production?  you won't really see a lot of difference in my experiences.

Maybe you are looking for

  • Driver WLAN for Satellite P10-540

    Hi friends...? use a satellite p10 540... the problem is any wireless lan driver? cannot install it from cd and the toshiba site... when? try to download it, there is a failure... it says that: drver was not updated!? NF file is invalid or incorrect?

  • No internet after updating 2016-001

    After the recent update (2016-001) for Yosemite 10.10.5 I can't access certain sites. I use the connection cable to the router (192.168.1.1). It says not available. Diagnosis of Internet, said ok. I also have Windows in VirtualBox. From there on, I c

  • Interpolation of XY graph

    Hi all I'm new to Labview. Probably, it is a simple question, but I'm really stuck on this issue. I have a XY Chart (resistance vs. temp thermistor; the curve looks like fucntion of disintegration), for each unit degree he is correspondent of resista

  • D7360 does not print in "Black Only", unless the color cartridges are also not empty

    I try to print a document in grayscale mode with Photosmart D7360 printer. In the Print Setup dialog explicitly checks the possibility of using 'Black Print Cartridge Only'. The printer refuses to print, in saying: «The following ink cartridges are e

  • While doing the research resource error file system

    Hello I get the error "system of resource files. I want to search for a string that is present on any of my 100 files I had already recorded in the SD card. But I am able to search for a string that is present only in the first 30 files and trying to